code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def snake_case__ ( self : Dict ):
__snake_case : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__snake_case : int = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(_lowerCAmelCase )
__snake_case : Dict = -1
__snake_case : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase )
__snake_case : List[Any] = model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase )
__snake_case : Dict = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
__snake_case : Tuple = TextStreamer(_lowerCAmelCase )
model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase , streamer=_lowerCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__snake_case : List[Any] = cs.out[:-1]
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def snake_case__ ( self : str ):
__snake_case : Optional[int] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__snake_case : Tuple = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(_lowerCAmelCase )
__snake_case : List[Any] = -1
__snake_case : Optional[int] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase )
__snake_case : str = model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase )
__snake_case : Optional[int] = tokenizer.decode(greedy_ids[0] )
__snake_case : List[Any] = TextIteratorStreamer(_lowerCAmelCase )
__snake_case : Optional[int] = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
__snake_case : str = Thread(target=model.generate , kwargs=_lowerCAmelCase )
thread.start()
__snake_case : Optional[Any] = """"""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def snake_case__ ( self : int ):
__snake_case : Tuple = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__snake_case : Optional[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(_lowerCAmelCase )
__snake_case : Any = -1
__snake_case : Optional[int] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase )
__snake_case : Dict = model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase )
__snake_case : Optional[int] = greedy_ids[:, input_ids.shape[1] :]
__snake_case : List[Any] = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
__snake_case : int = TextStreamer(_lowerCAmelCase , skip_prompt=_lowerCAmelCase )
model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase , streamer=_lowerCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__snake_case : Dict = cs.out[:-1]
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def snake_case__ ( self : Optional[int] ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
__snake_case : Union[str, Any] = AutoTokenizer.from_pretrained("""distilgpt2""" )
__snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(_lowerCAmelCase )
__snake_case : List[str] = -1
__snake_case : List[Any] = torch.ones((1, 5) , device=_lowerCAmelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
__snake_case : int = TextStreamer(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
model.generate(_lowerCAmelCase , max_new_tokens=1 , do_sample=_lowerCAmelCase , streamer=_lowerCAmelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
__snake_case : Dict = cs.out[:-1] # Remove the final "\n"
__snake_case : List[Any] = tokenizer(_lowerCAmelCase , return_tensors="""pt""" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def snake_case__ ( self : Optional[int] ):
__snake_case : int = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__snake_case : Tuple = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(_lowerCAmelCase )
__snake_case : Union[str, Any] = -1
__snake_case : Optional[int] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase )
__snake_case : List[str] = TextIteratorStreamer(_lowerCAmelCase , timeout=0.001 )
__snake_case : List[Any] = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
__snake_case : List[str] = Thread(target=model.generate , kwargs=_lowerCAmelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_lowerCAmelCase ):
__snake_case : Tuple = """"""
for new_text in streamer:
streamer_text += new_text
| 355
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase_ = {"configuration_yolos": ["YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP", "YolosConfig", "YolosOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["YolosFeatureExtractor"]
lowercase_ = ["YolosImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST",
"YolosForObjectDetection",
"YolosModel",
"YolosPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 20
| 0
|
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : Optional[Any] = (PNDMScheduler,)
A : Union[str, Any] = (("num_inference_steps", 50),)
def snake_case__ ( self : Any , **_lowerCAmelCase : Optional[Any] ):
__snake_case : int = {
"""num_train_timesteps""": 10_00,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**_lowerCAmelCase )
return config
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : Optional[int]=0 , **_lowerCAmelCase : Dict ):
__snake_case : Tuple = dict(self.forward_default_kwargs )
__snake_case : int = kwargs.pop("""num_inference_steps""" , _lowerCAmelCase )
__snake_case : Any = self.dummy_sample
__snake_case : int = 0.1 * sample
__snake_case : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__snake_case : str = self.get_scheduler_config(**_lowerCAmelCase )
__snake_case : Optional[Any] = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(_lowerCAmelCase )
# copy over dummy past residuals
__snake_case : int = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowerCAmelCase )
__snake_case : str = scheduler_class.from_pretrained(_lowerCAmelCase )
new_scheduler.set_timesteps(_lowerCAmelCase )
# copy over dummy past residuals
__snake_case : Union[str, Any] = dummy_past_residuals[:]
__snake_case : Union[str, Any] = scheduler.step_prk(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
__snake_case : int = new_scheduler.step_prk(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
__snake_case : List[Any] = scheduler.step_plms(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
__snake_case : Optional[int] = new_scheduler.step_plms(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case__ ( self : int ):
pass
def snake_case__ ( self : Optional[int] , _lowerCAmelCase : str=0 , **_lowerCAmelCase : Tuple ):
__snake_case : str = dict(self.forward_default_kwargs )
__snake_case : Any = kwargs.pop("""num_inference_steps""" , _lowerCAmelCase )
__snake_case : Optional[Any] = self.dummy_sample
__snake_case : str = 0.1 * sample
__snake_case : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__snake_case : Optional[int] = self.get_scheduler_config()
__snake_case : int = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(_lowerCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
__snake_case : Optional[int] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowerCAmelCase )
__snake_case : Union[str, Any] = scheduler_class.from_pretrained(_lowerCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_lowerCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
__snake_case : Any = dummy_past_residuals[:]
__snake_case : List[Any] = scheduler.step_prk(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
__snake_case : Union[str, Any] = new_scheduler.step_prk(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
__snake_case : str = scheduler.step_plms(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
__snake_case : str = new_scheduler.step_plms(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case__ ( self : Dict , **_lowerCAmelCase : str ):
__snake_case : Dict = self.scheduler_classes[0]
__snake_case : Union[str, Any] = self.get_scheduler_config(**_lowerCAmelCase )
__snake_case : str = scheduler_class(**_lowerCAmelCase )
__snake_case : int = 10
__snake_case : int = self.dummy_model()
__snake_case : int = self.dummy_sample_deter
scheduler.set_timesteps(_lowerCAmelCase )
for i, t in enumerate(scheduler.prk_timesteps ):
__snake_case : int = model(_lowerCAmelCase , _lowerCAmelCase )
__snake_case : Tuple = scheduler.step_prk(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
__snake_case : Union[str, Any] = model(_lowerCAmelCase , _lowerCAmelCase )
__snake_case : Optional[Any] = scheduler.step_plms(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
return sample
def snake_case__ ( self : Any ):
__snake_case : List[Any] = dict(self.forward_default_kwargs )
__snake_case : Union[str, Any] = kwargs.pop("""num_inference_steps""" , _lowerCAmelCase )
for scheduler_class in self.scheduler_classes:
__snake_case : Optional[Any] = self.get_scheduler_config()
__snake_case : Tuple = scheduler_class(**_lowerCAmelCase )
__snake_case : Any = self.dummy_sample
__snake_case : int = 0.1 * sample
if num_inference_steps is not None and hasattr(_lowerCAmelCase , """set_timesteps""" ):
scheduler.set_timesteps(_lowerCAmelCase )
elif num_inference_steps is not None and not hasattr(_lowerCAmelCase , """set_timesteps""" ):
__snake_case : List[str] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__snake_case : int = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
__snake_case : Optional[int] = dummy_past_residuals[:]
__snake_case : List[Any] = scheduler.step_prk(_lowerCAmelCase , 0 , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
__snake_case : int = scheduler.step_prk(_lowerCAmelCase , 1 , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
__snake_case : Optional[Any] = scheduler.step_plms(_lowerCAmelCase , 0 , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
__snake_case : Tuple = scheduler.step_plms(_lowerCAmelCase , 1 , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def snake_case__ ( self : Dict ):
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def snake_case__ ( self : Dict ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_lowerCAmelCase )
__snake_case : Optional[Any] = self.scheduler_classes[0]
__snake_case : str = self.get_scheduler_config(steps_offset=1 )
__snake_case : Dict = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1] ) , )
def snake_case__ ( self : int ):
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=_lowerCAmelCase , beta_end=_lowerCAmelCase )
def snake_case__ ( self : Dict ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowerCAmelCase )
def snake_case__ ( self : List[str] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def snake_case__ ( self : Dict ):
for t in [1, 5, 10]:
self.check_over_forward(time_step=_lowerCAmelCase )
def snake_case__ ( self : List[str] ):
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=_lowerCAmelCase )
def snake_case__ ( self : Optional[int] ):
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
__snake_case : str = 27
for scheduler_class in self.scheduler_classes:
__snake_case : List[str] = self.dummy_sample
__snake_case : int = 0.1 * sample
__snake_case : str = self.get_scheduler_config()
__snake_case : Dict = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(_lowerCAmelCase )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
__snake_case : Optional[int] = scheduler.step_prk(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
def snake_case__ ( self : Optional[int] ):
with self.assertRaises(_lowerCAmelCase ):
__snake_case : Any = self.scheduler_classes[0]
__snake_case : Tuple = self.get_scheduler_config()
__snake_case : List[str] = scheduler_class(**_lowerCAmelCase )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def snake_case__ ( self : Dict ):
__snake_case : Union[str, Any] = self.full_loop()
__snake_case : Tuple = torch.sum(torch.abs(_lowerCAmelCase ) )
__snake_case : Optional[int] = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 198.1318 ) < 1e-2
assert abs(result_mean.item() - 0.2580 ) < 1e-3
def snake_case__ ( self : Tuple ):
__snake_case : Dict = self.full_loop(prediction_type="""v_prediction""" )
__snake_case : List[str] = torch.sum(torch.abs(_lowerCAmelCase ) )
__snake_case : Union[str, Any] = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 67.3986 ) < 1e-2
assert abs(result_mean.item() - 0.0878 ) < 1e-3
def snake_case__ ( self : int ):
# We specify different beta, so that the first alpha is 0.99
__snake_case : List[str] = self.full_loop(set_alpha_to_one=_lowerCAmelCase , beta_start=0.01 )
__snake_case : Tuple = torch.sum(torch.abs(_lowerCAmelCase ) )
__snake_case : Optional[int] = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 230.0399 ) < 1e-2
assert abs(result_mean.item() - 0.2995 ) < 1e-3
def snake_case__ ( self : List[str] ):
# We specify different beta, so that the first alpha is 0.99
__snake_case : int = self.full_loop(set_alpha_to_one=_lowerCAmelCase , beta_start=0.01 )
__snake_case : Dict = torch.sum(torch.abs(_lowerCAmelCase ) )
__snake_case : List[str] = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 186.9482 ) < 1e-2
assert abs(result_mean.item() - 0.2434 ) < 1e-3
| 356
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : Optional[int] = "microsoft/speecht5_tts"
A : List[Any] = (
"This is a tool that reads an English text out loud. It takes an input named `text` which should contain the "
"text to read (in English) and returns a waveform object containing the sound."
)
A : str = "text_reader"
A : Optional[Any] = SpeechTaProcessor
A : Any = SpeechTaForTextToSpeech
A : Optional[Any] = SpeechTaHifiGan
A : str = ["text"]
A : Union[str, Any] = ["audio"]
def snake_case__ ( self : List[Any] ):
if self.post_processor is None:
__snake_case : Tuple = """microsoft/speecht5_hifigan"""
super().setup()
def snake_case__ ( self : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple=None ):
__snake_case : str = self.pre_processor(text=_lowerCAmelCase , return_tensors="""pt""" , truncation=_lowerCAmelCase )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" )
__snake_case : List[Any] = load_dataset("""Matthijs/cmu-arctic-xvectors""" , split="""validation""" )
__snake_case : str = torch.tensor(embeddings_dataset[73_05]["""xvector"""] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def snake_case__ ( self : List[Any] , _lowerCAmelCase : Dict ):
with torch.no_grad():
return self.model.generate_speech(**_lowerCAmelCase )
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : int ):
with torch.no_grad():
return self.post_processor(_lowerCAmelCase ).cpu().detach()
| 20
| 0
|
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
lowercase_ = logging.get_logger(__name__)
@add_end_docstrings(
__UpperCamelCase , r"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n " , )
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def snake_case__ ( self : str , _lowerCAmelCase : GenericTensor ):
if self.framework == "tf":
__snake_case : List[str] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
__snake_case : Optional[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_lowerCAmelCase )
else:
raise ValueError("""Unsupported framework""" )
return masked_index
def snake_case__ ( self : int , _lowerCAmelCase : GenericTensor ):
__snake_case : List[str] = self.get_masked_index(_lowerCAmelCase )
__snake_case : Any = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"""fill-mask""" , self.model.base_model_prefix , f'''No mask_token ({self.tokenizer.mask_token}) found on the input''' , )
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : GenericTensor ):
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["""input_ids"""][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_lowerCAmelCase )
def snake_case__ ( self : int , _lowerCAmelCase : Dict , _lowerCAmelCase : str=None , **_lowerCAmelCase : Union[str, Any] ):
if return_tensors is None:
__snake_case : Union[str, Any] = self.framework
__snake_case : Any = self.tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase )
self.ensure_exactly_one_mask_token(_lowerCAmelCase )
return model_inputs
def snake_case__ ( self : List[Any] , _lowerCAmelCase : List[str] ):
__snake_case : List[str] = self.model(**_lowerCAmelCase )
__snake_case : Optional[int] = model_inputs["""input_ids"""]
return model_outputs
def snake_case__ ( self : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any]=5 , _lowerCAmelCase : List[Any]=None ):
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
__snake_case : Optional[int] = target_ids.shape[0]
__snake_case : Dict = model_outputs["""input_ids"""][0]
__snake_case : List[str] = model_outputs["""logits"""]
if self.framework == "tf":
__snake_case : str = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
__snake_case : Optional[Any] = outputs.numpy()
__snake_case : Optional[int] = outputs[0, masked_index, :]
__snake_case : int = stable_softmax(_lowerCAmelCase , axis=-1 )
if target_ids is not None:
__snake_case : Any = tf.gather_nd(tf.squeeze(_lowerCAmelCase , 0 ) , target_ids.reshape(-1 , 1 ) )
__snake_case : Any = tf.expand_dims(_lowerCAmelCase , 0 )
__snake_case : Tuple = tf.math.top_k(_lowerCAmelCase , k=_lowerCAmelCase )
__snake_case : List[Any] = topk.values.numpy(), topk.indices.numpy()
else:
__snake_case : Union[str, Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_lowerCAmelCase ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
__snake_case : Dict = outputs[0, masked_index, :]
__snake_case : str = logits.softmax(dim=-1 )
if target_ids is not None:
__snake_case : Optional[Any] = probs[..., target_ids]
__snake_case : Tuple = probs.topk(_lowerCAmelCase )
__snake_case : Tuple = []
__snake_case : str = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
__snake_case : List[str] = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
__snake_case : Tuple = input_ids.numpy().copy()
if target_ids is not None:
__snake_case : List[str] = target_ids[p].tolist()
__snake_case : Any = p
# Filter padding out:
__snake_case : List[Any] = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
__snake_case : int = self.tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
__snake_case : str = {"""score""": v, """token""": p, """token_str""": self.tokenizer.decode([p] ), """sequence""": sequence}
row.append(_lowerCAmelCase )
result.append(_lowerCAmelCase )
if single_mask:
return result[0]
return result
def snake_case__ ( self : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : str=None ):
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__snake_case : Any = [targets]
try:
__snake_case : List[Any] = self.tokenizer.get_vocab()
except Exception:
__snake_case : Optional[Any] = {}
__snake_case : Optional[int] = []
for target in targets:
__snake_case : int = vocab.get(_lowerCAmelCase , _lowerCAmelCase )
if id_ is None:
__snake_case : Tuple = self.tokenizer(
_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , max_length=1 , truncation=_lowerCAmelCase , )["""input_ids"""]
if len(_lowerCAmelCase ) == 0:
logger.warning(
f'''The specified target token `{target}` does not exist in the model vocabulary. '''
"""We cannot replace it with anything meaningful, ignoring it""" )
continue
__snake_case : Optional[int] = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f'''The specified target token `{target}` does not exist in the model vocabulary. '''
f'''Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.''' )
target_ids.append(id_ )
__snake_case : Optional[Any] = list(set(_lowerCAmelCase ) )
if len(_lowerCAmelCase ) == 0:
raise ValueError("""At least one target must be provided when passed.""" )
__snake_case : Dict = np.array(_lowerCAmelCase )
return target_ids
def snake_case__ ( self : Any , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Union[str, Any]=None ):
__snake_case : Tuple = {}
if targets is not None:
__snake_case : int = self.get_target_ids(_lowerCAmelCase , _lowerCAmelCase )
__snake_case : Optional[int] = target_ids
if top_k is not None:
__snake_case : Tuple = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"""fill-mask""" , self.model.base_model_prefix , """The tokenizer does not define a `mask_token`.""" )
return {}, {}, postprocess_params
def __call__( self : List[Any] , _lowerCAmelCase : Optional[int] , *_lowerCAmelCase : Dict , **_lowerCAmelCase : Union[str, Any] ):
__snake_case : List[str] = super().__call__(_lowerCAmelCase , **_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(_lowerCAmelCase ) == 1:
return outputs[0]
return outputs
| 357
|
import math
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
if (
not isinstance(__SCREAMING_SNAKE_CASE , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * power_factor
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
if (
not isinstance(__SCREAMING_SNAKE_CASE , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20
| 0
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def snake_case__ ( self : int ):
__snake_case : int = tempfile.mkdtemp()
# fmt: off
__snake_case : Any = ["""""", """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
__snake_case : Any = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
__snake_case : List[str] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
__snake_case : str = {"""unk_token""": """<unk>"""}
__snake_case : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__snake_case : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_lowerCAmelCase ) )
__snake_case : Union[str, Any] = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48145466, 0.4578275, 0.40821073],
"""image_std""": [0.26862954, 0.26130258, 0.27577711],
}
__snake_case : Union[str, Any] = os.path.join(self.tmpdirname , _lowerCAmelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def snake_case__ ( self : Tuple , **_lowerCAmelCase : Optional[int] ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="""!""" , **_lowerCAmelCase )
def snake_case__ ( self : Optional[int] , **_lowerCAmelCase : Optional[Any] ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="""!""" , **_lowerCAmelCase )
def snake_case__ ( self : Any , **_lowerCAmelCase : int ):
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def snake_case__ ( self : Any ):
shutil.rmtree(self.tmpdirname )
def snake_case__ ( self : List[Any] ):
__snake_case : Optional[int] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__snake_case : int = [Image.fromarray(np.moveaxis(_lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case__ ( self : str ):
__snake_case : Any = self.get_tokenizer()
__snake_case : Any = self.get_rust_tokenizer()
__snake_case : Union[str, Any] = self.get_image_processor()
__snake_case : List[Any] = OwlViTProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
__snake_case : Any = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCAmelCase )
__snake_case : str = OwlViTProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
__snake_case : int = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _lowerCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , _lowerCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _lowerCAmelCase )
self.assertIsInstance(processor_fast.image_processor , _lowerCAmelCase )
def snake_case__ ( self : List[str] ):
__snake_case : Dict = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__snake_case : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__snake_case : List[str] = self.get_image_processor(do_normalize=_lowerCAmelCase )
__snake_case : List[str] = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_lowerCAmelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCAmelCase )
def snake_case__ ( self : Union[str, Any] ):
__snake_case : Tuple = self.get_image_processor()
__snake_case : List[Any] = self.get_tokenizer()
__snake_case : Tuple = OwlViTProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
__snake_case : Union[str, Any] = self.prepare_image_inputs()
__snake_case : List[str] = image_processor(_lowerCAmelCase , return_tensors="""np""" )
__snake_case : Dict = processor(images=_lowerCAmelCase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case__ ( self : Optional[Any] ):
__snake_case : Any = self.get_image_processor()
__snake_case : List[str] = self.get_tokenizer()
__snake_case : List[Any] = OwlViTProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
__snake_case : Optional[Any] = """lower newer"""
__snake_case : Dict = processor(text=_lowerCAmelCase , return_tensors="""np""" )
__snake_case : Optional[int] = tokenizer(_lowerCAmelCase , return_tensors="""np""" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def snake_case__ ( self : Any ):
__snake_case : Optional[Any] = self.get_image_processor()
__snake_case : Tuple = self.get_tokenizer()
__snake_case : Tuple = OwlViTProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
__snake_case : List[str] = """lower newer"""
__snake_case : int = self.prepare_image_inputs()
__snake_case : List[str] = processor(text=_lowerCAmelCase , images=_lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(_lowerCAmelCase ):
processor()
def snake_case__ ( self : Any ):
__snake_case : str = """google/owlvit-base-patch32"""
__snake_case : List[str] = OwlViTProcessor.from_pretrained(_lowerCAmelCase )
__snake_case : Any = ["""cat""", """nasa badge"""]
__snake_case : Optional[int] = processor(text=_lowerCAmelCase )
__snake_case : int = 16
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_lowerCAmelCase ):
processor()
def snake_case__ ( self : Dict ):
__snake_case : List[str] = """google/owlvit-base-patch32"""
__snake_case : List[Any] = OwlViTProcessor.from_pretrained(_lowerCAmelCase )
__snake_case : Any = [["""cat""", """nasa badge"""], ["""person"""]]
__snake_case : Optional[Any] = processor(text=_lowerCAmelCase )
__snake_case : Union[str, Any] = 16
__snake_case : Dict = len(_lowerCAmelCase )
__snake_case : Optional[Any] = max([len(_lowerCAmelCase ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_lowerCAmelCase ):
processor()
def snake_case__ ( self : Optional[int] ):
__snake_case : int = """google/owlvit-base-patch32"""
__snake_case : Union[str, Any] = OwlViTProcessor.from_pretrained(_lowerCAmelCase )
__snake_case : int = ["""cat""", """nasa badge"""]
__snake_case : Any = processor(text=_lowerCAmelCase )
__snake_case : Optional[Any] = 16
__snake_case : Any = inputs["""input_ids"""]
__snake_case : str = [
[4_94_06, 23_68, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_94_06, 68_41, 1_13_01, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def snake_case__ ( self : List[Any] ):
__snake_case : str = self.get_image_processor()
__snake_case : Dict = self.get_tokenizer()
__snake_case : str = OwlViTProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
__snake_case : List[Any] = self.prepare_image_inputs()
__snake_case : int = self.prepare_image_inputs()
__snake_case : List[str] = processor(images=_lowerCAmelCase , query_images=_lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""query_pixel_values""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(_lowerCAmelCase ):
processor()
def snake_case__ ( self : Any ):
__snake_case : Optional[Any] = self.get_image_processor()
__snake_case : Any = self.get_tokenizer()
__snake_case : Dict = OwlViTProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
__snake_case : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__snake_case : Any = processor.batch_decode(_lowerCAmelCase )
__snake_case : str = tokenizer.batch_decode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
| 358
|
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 20
| 0
|
def __lowerCAmelCase ( ):
'''simple docstring'''
for n in range(1 , 1_00_00_00 ):
yield n * (n + 1) // 2
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
__snake_case : int = 1
__snake_case : Any = 2
while i * i <= n:
__snake_case : Union[str, Any] = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def __lowerCAmelCase ( ):
'''simple docstring'''
return next(i for i in triangle_number_generator() if count_divisors(__SCREAMING_SNAKE_CASE ) > 5_00 )
if __name__ == "__main__":
print(solution())
| 359
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {
"configuration_blenderbot_small": [
"BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotSmallConfig",
"BlenderbotSmallOnnxConfig",
],
"tokenization_blenderbot_small": ["BlenderbotSmallTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["BlenderbotSmallTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotSmallForCausalLM",
"BlenderbotSmallForConditionalGeneration",
"BlenderbotSmallModel",
"BlenderbotSmallPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TFBlenderbotSmallForConditionalGeneration",
"TFBlenderbotSmallModel",
"TFBlenderbotSmallPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"FlaxBlenderbotSmallForConditionalGeneration",
"FlaxBlenderbotSmallModel",
"FlaxBlenderbotSmallPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 20
| 0
|
from __future__ import annotations
import numpy as np
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : np.ndarray ):
'''simple docstring'''
__snake_case : List[Any] = np.shape(__SCREAMING_SNAKE_CASE )
if rows != columns:
__snake_case : Dict = (
"""'table' has to be of square shaped array but got a """
F'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(__SCREAMING_SNAKE_CASE )
__snake_case : List[Any] = np.zeros((rows, columns) )
__snake_case : int = np.zeros((rows, columns) )
for i in range(__SCREAMING_SNAKE_CASE ):
for j in range(__SCREAMING_SNAKE_CASE ):
__snake_case : Dict = sum(lower[i][k] * upper[k][j] for k in range(__SCREAMING_SNAKE_CASE ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
__snake_case : Tuple = (table[i][j] - total) / upper[j][j]
__snake_case : Optional[int] = 1
for j in range(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case : Any = sum(lower[i][k] * upper[k][j] for k in range(__SCREAMING_SNAKE_CASE ) )
__snake_case : Tuple = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 360
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["ViTFeatureExtractor"]
lowercase_ = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 20
| 0
|
"""simple docstring"""
import os
import numpy
import onnx
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
__snake_case : List[Any] = a.name
__snake_case : List[str] = b.name
__snake_case : str = """"""
__snake_case : int = """"""
__snake_case : Tuple = a == b
__snake_case : Tuple = name_a
__snake_case : str = name_b
return res
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
_graph_replace_input_with(node_proto.attribute[1].g , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
for n in graph_proto.node:
_node_replace_input_with(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
__snake_case : Union[str, Any] = list(model.graph.initializer )
__snake_case : str = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
__snake_case : Tuple = inits[i].name
__snake_case : Dict = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
__snake_case : Dict = os.path.dirname(__SCREAMING_SNAKE_CASE )
__snake_case : Any = os.path.basename(__SCREAMING_SNAKE_CASE )
__snake_case : Optional[int] = onnx.load(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
__snake_case : Dict = list(model.graph.initializer )
__snake_case : List[Any] = set()
__snake_case : Tuple = {}
__snake_case : List[str] = []
__snake_case : Union[str, Any] = 0
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(__SCREAMING_SNAKE_CASE ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(__SCREAMING_SNAKE_CASE )
dup_set.add(__SCREAMING_SNAKE_CASE )
__snake_case : List[Any] = inits[j].data_type
__snake_case : int = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 1_1:
mem_size *= 8
else:
print("""unexpected data type: """ , __SCREAMING_SNAKE_CASE )
total_reduced_size += mem_size
__snake_case : str = inits[i].name
__snake_case : Dict = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(__SCREAMING_SNAKE_CASE )
else:
__snake_case : Optional[int] = [name_j]
ind_to_replace.append((j, i) )
print("""total reduced size: """ , total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 , """GB""" )
__snake_case : Optional[Any] = sorted(__SCREAMING_SNAKE_CASE )
_remove_dup_initializers_from_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case : List[str] = """optimized_""" + model_file_name
__snake_case : List[str] = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
onnx.save(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return new_model
| 361
|
from __future__ import annotations
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
__snake_case , __snake_case : str = array[indexa], array[indexa]
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if length > 1:
__snake_case : Tuple = int(length / 2 )
for i in range(__SCREAMING_SNAKE_CASE , low + middle ):
comp_and_swap(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , i + middle , __SCREAMING_SNAKE_CASE )
bitonic_merge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
bitonic_merge(__SCREAMING_SNAKE_CASE , low + middle , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if length > 1:
__snake_case : Optional[Any] = int(length / 2 )
bitonic_sort(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , 1 )
bitonic_sort(__SCREAMING_SNAKE_CASE , low + middle , __SCREAMING_SNAKE_CASE , 0 )
bitonic_merge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = input("Enter numbers separated by a comma:\n").strip()
lowercase_ = [int(item.strip()) for item in user_input.split(",")]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print("\nSorted array in ascending order is: ", end="")
print(*unsorted, sep=", ")
bitonic_merge(unsorted, 0, len(unsorted), 0)
print("Sorted array in descending order is: ", end="")
print(*unsorted, sep=", ")
| 20
| 0
|
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
lowercase_ = logging.get_logger(__name__)
lowercase_ = OrderedDict(
[
("align", "EfficientNetImageProcessor"),
("beit", "BeitImageProcessor"),
("bit", "BitImageProcessor"),
("blip", "BlipImageProcessor"),
("blip-2", "BlipImageProcessor"),
("bridgetower", "BridgeTowerImageProcessor"),
("chinese_clip", "ChineseCLIPImageProcessor"),
("clip", "CLIPImageProcessor"),
("clipseg", "ViTImageProcessor"),
("conditional_detr", "ConditionalDetrImageProcessor"),
("convnext", "ConvNextImageProcessor"),
("convnextv2", "ConvNextImageProcessor"),
("cvt", "ConvNextImageProcessor"),
("data2vec-vision", "BeitImageProcessor"),
("deformable_detr", "DeformableDetrImageProcessor"),
("deit", "DeiTImageProcessor"),
("deta", "DetaImageProcessor"),
("detr", "DetrImageProcessor"),
("dinat", "ViTImageProcessor"),
("donut-swin", "DonutImageProcessor"),
("dpt", "DPTImageProcessor"),
("efficientformer", "EfficientFormerImageProcessor"),
("efficientnet", "EfficientNetImageProcessor"),
("flava", "FlavaImageProcessor"),
("focalnet", "BitImageProcessor"),
("git", "CLIPImageProcessor"),
("glpn", "GLPNImageProcessor"),
("groupvit", "CLIPImageProcessor"),
("imagegpt", "ImageGPTImageProcessor"),
("instructblip", "BlipImageProcessor"),
("layoutlmv2", "LayoutLMv2ImageProcessor"),
("layoutlmv3", "LayoutLMv3ImageProcessor"),
("levit", "LevitImageProcessor"),
("mask2former", "Mask2FormerImageProcessor"),
("maskformer", "MaskFormerImageProcessor"),
("mgp-str", "ViTImageProcessor"),
("mobilenet_v1", "MobileNetV1ImageProcessor"),
("mobilenet_v2", "MobileNetV2ImageProcessor"),
("mobilevit", "MobileViTImageProcessor"),
("mobilevit", "MobileViTImageProcessor"),
("mobilevitv2", "MobileViTImageProcessor"),
("nat", "ViTImageProcessor"),
("oneformer", "OneFormerImageProcessor"),
("owlvit", "OwlViTImageProcessor"),
("perceiver", "PerceiverImageProcessor"),
("pix2struct", "Pix2StructImageProcessor"),
("poolformer", "PoolFormerImageProcessor"),
("regnet", "ConvNextImageProcessor"),
("resnet", "ConvNextImageProcessor"),
("sam", "SamImageProcessor"),
("segformer", "SegformerImageProcessor"),
("swiftformer", "ViTImageProcessor"),
("swin", "ViTImageProcessor"),
("swin2sr", "Swin2SRImageProcessor"),
("swinv2", "ViTImageProcessor"),
("table-transformer", "DetrImageProcessor"),
("timesformer", "VideoMAEImageProcessor"),
("tvlt", "TvltImageProcessor"),
("upernet", "SegformerImageProcessor"),
("van", "ConvNextImageProcessor"),
("videomae", "VideoMAEImageProcessor"),
("vilt", "ViltImageProcessor"),
("vit", "ViTImageProcessor"),
("vit_hybrid", "ViTHybridImageProcessor"),
("vit_mae", "ViTImageProcessor"),
("vit_msn", "ViTImageProcessor"),
("xclip", "CLIPImageProcessor"),
("yolos", "YolosImageProcessor"),
]
)
lowercase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
__snake_case : Any = model_type_to_module_name(__SCREAMING_SNAKE_CASE )
__snake_case : Optional[int] = importlib.import_module(F'''.{module_name}''' , """transformers.models""" )
try:
return getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(__SCREAMING_SNAKE_CASE , """__name__""" , __SCREAMING_SNAKE_CASE ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
__snake_case : str = importlib.import_module("""transformers""" )
if hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return None
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , __SCREAMING_SNAKE_CASE : Optional[Union[str, os.PathLike]] = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[Dict[str, str]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[bool, str]] = None , __SCREAMING_SNAKE_CASE : Optional[str] = None , __SCREAMING_SNAKE_CASE : bool = False , **__SCREAMING_SNAKE_CASE : Optional[Any] , ):
'''simple docstring'''
__snake_case : Optional[int] = get_file_from_repo(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , force_download=__SCREAMING_SNAKE_CASE , resume_download=__SCREAMING_SNAKE_CASE , proxies=__SCREAMING_SNAKE_CASE , use_auth_token=__SCREAMING_SNAKE_CASE , revision=__SCREAMING_SNAKE_CASE , local_files_only=__SCREAMING_SNAKE_CASE , )
if resolved_config_file is None:
logger.info(
"""Could not locate the image processor configuration file, will try to use the model config instead.""" )
return {}
with open(__SCREAMING_SNAKE_CASE , encoding="""utf-8""" ) as reader:
return json.load(__SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE__ :
def __init__( self : str ):
raise EnvironmentError(
"""AutoImageProcessor is designed to be instantiated """
"""using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.""" )
@classmethod
@replace_list_option_in_docstrings(_lowerCAmelCase )
def snake_case__ ( cls : Optional[Any] , _lowerCAmelCase : List[Any] , **_lowerCAmelCase : str ):
__snake_case : int = kwargs.pop("""config""" , _lowerCAmelCase )
__snake_case : str = kwargs.pop("""trust_remote_code""" , _lowerCAmelCase )
__snake_case : Any = True
__snake_case : Optional[Any] = ImageProcessingMixin.get_image_processor_dict(_lowerCAmelCase , **_lowerCAmelCase )
__snake_case : List[str] = config_dict.get("""image_processor_type""" , _lowerCAmelCase )
__snake_case : Union[str, Any] = None
if "AutoImageProcessor" in config_dict.get("""auto_map""" , {} ):
__snake_case : List[str] = config_dict["""auto_map"""]["""AutoImageProcessor"""]
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
__snake_case : int = config_dict.pop("""feature_extractor_type""" , _lowerCAmelCase )
if feature_extractor_class is not None:
logger.warning(
"""Could not find image processor class in the image processor config or the model config. Loading"""
""" based on pattern matching with the model's feature extractor configuration.""" )
__snake_case : List[Any] = feature_extractor_class.replace("""FeatureExtractor""" , """ImageProcessor""" )
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ):
__snake_case : Tuple = config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
__snake_case : Optional[Any] = feature_extractor_auto_map.replace("""FeatureExtractor""" , """ImageProcessor""" )
logger.warning(
"""Could not find image processor auto map in the image processor config or the model config."""
""" Loading based on pattern matching with the model's feature extractor configuration.""" )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__snake_case : List[Any] = AutoConfig.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
# It could be in `config.image_processor_type``
__snake_case : Dict = getattr(_lowerCAmelCase , """image_processor_type""" , _lowerCAmelCase )
if hasattr(_lowerCAmelCase , """auto_map""" ) and "AutoImageProcessor" in config.auto_map:
__snake_case : Tuple = config.auto_map["""AutoImageProcessor"""]
if image_processor_class is not None:
__snake_case : Optional[int] = image_processor_class_from_name(_lowerCAmelCase )
__snake_case : Optional[Any] = image_processor_auto_map is not None
__snake_case : Optional[Any] = image_processor_class is not None or type(_lowerCAmelCase ) in IMAGE_PROCESSOR_MAPPING
__snake_case : List[str] = resolve_trust_remote_code(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if has_remote_code and trust_remote_code:
__snake_case : Any = get_class_from_dynamic_module(
_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
__snake_case : Union[str, Any] = kwargs.pop("""code_revision""" , _lowerCAmelCase )
if os.path.isdir(_lowerCAmelCase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(_lowerCAmelCase , **_lowerCAmelCase )
elif image_processor_class is not None:
return image_processor_class.from_dict(_lowerCAmelCase , **_lowerCAmelCase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(_lowerCAmelCase ) in IMAGE_PROCESSOR_MAPPING:
__snake_case : Optional[Any] = IMAGE_PROCESSOR_MAPPING[type(_lowerCAmelCase )]
return image_processor_class.from_dict(_lowerCAmelCase , **_lowerCAmelCase )
raise ValueError(
f'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '''
f'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '''
f'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def snake_case__ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple ):
IMAGE_PROCESSOR_MAPPING.register(_lowerCAmelCase , _lowerCAmelCase )
| 362
|
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
"""compression_format, is_archive""" , [
("""7z""", True),
("""bz2""", False),
("""gzip""", False),
("""lz4""", False),
("""tar""", True),
("""xz""", False),
("""zip""", True),
("""zstd""", False),
] , )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , ):
'''simple docstring'''
__snake_case : Optional[int] = {
"""7z""": (seven_zip_file, SevenZipExtractor),
"""bz2""": (bza_file, BzipaExtractor),
"""gzip""": (gz_file, GzipExtractor),
"""lz4""": (lza_file, LzaExtractor),
"""tar""": (tar_file, TarExtractor),
"""xz""": (xz_file, XzExtractor),
"""zip""": (zip_file, ZipExtractor),
"""zstd""": (zstd_file, ZstdExtractor),
}
__snake_case , __snake_case : Tuple = input_paths_and_base_extractors[compression_format]
if input_path is None:
__snake_case : Tuple = F'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__SCREAMING_SNAKE_CASE )
assert base_extractor.is_extractable(__SCREAMING_SNAKE_CASE )
__snake_case : List[str] = tmp_path / ("""extracted""" if is_archive else """extracted.txt""")
base_extractor.extract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
__snake_case : List[str] = file_path.read_text(encoding="""utf-8""" )
else:
__snake_case : Optional[Any] = output_path.read_text(encoding="""utf-8""" )
__snake_case : int = text_file.read_text(encoding="""utf-8""" )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
"""compression_format, is_archive""" , [
("""7z""", True),
("""bz2""", False),
("""gzip""", False),
("""lz4""", False),
("""tar""", True),
("""xz""", False),
("""zip""", True),
("""zstd""", False),
] , )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , ):
'''simple docstring'''
__snake_case : Union[str, Any] = {
"""7z""": seven_zip_file,
"""bz2""": bza_file,
"""gzip""": gz_file,
"""lz4""": lza_file,
"""tar""": tar_file,
"""xz""": xz_file,
"""zip""": zip_file,
"""zstd""": zstd_file,
}
__snake_case : int = input_paths[compression_format]
if input_path is None:
__snake_case : int = F'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__SCREAMING_SNAKE_CASE )
__snake_case : Any = Extractor.infer_extractor_format(__SCREAMING_SNAKE_CASE )
assert extractor_format is not None
__snake_case : Tuple = tmp_path / ("""extracted""" if is_archive else """extracted.txt""")
Extractor.extract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
__snake_case : Union[str, Any] = file_path.read_text(encoding="""utf-8""" )
else:
__snake_case : Union[str, Any] = output_path.read_text(encoding="""utf-8""" )
__snake_case : Optional[Any] = text_file.read_text(encoding="""utf-8""" )
assert extracted_file_content == expected_file_content
@pytest.fixture
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
import tarfile
__snake_case : List[str] = tmp_path / """data_dot_dot"""
directory.mkdir()
__snake_case : Optional[Any] = directory / """tar_file_with_dot_dot.tar"""
with tarfile.TarFile(__SCREAMING_SNAKE_CASE , """w""" ) as f:
f.add(__SCREAMING_SNAKE_CASE , arcname=os.path.join("""..""" , text_file.name ) )
return path
@pytest.fixture
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
import tarfile
__snake_case : Dict = tmp_path / """data_sym_link"""
directory.mkdir()
__snake_case : Tuple = directory / """tar_file_with_sym_link.tar"""
os.symlink("""..""" , directory / """subdir""" , target_is_directory=__SCREAMING_SNAKE_CASE )
with tarfile.TarFile(__SCREAMING_SNAKE_CASE , """w""" ) as f:
f.add(str(directory / """subdir""" ) , arcname="""subdir""" ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
"""insecure_tar_file, error_log""" , [("""tar_file_with_dot_dot""", """illegal path"""), ("""tar_file_with_sym_link""", """Symlink""")] , )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
__snake_case : Any = {
"""tar_file_with_dot_dot""": tar_file_with_dot_dot,
"""tar_file_with_sym_link""": tar_file_with_sym_link,
}
__snake_case : int = insecure_tar_files[insecure_tar_file]
__snake_case : Optional[int] = tmp_path / """extracted"""
TarExtractor.extract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
__snake_case : Optional[Any] = tmpdir / """not_a_zip_file"""
# From: https://github.com/python/cpython/pull/5053
__snake_case : List[str] = (
b"""\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00"""
b"""\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I"""
b"""DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07"""
b"""\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82"""
)
with not_a_zip_file.open("""wb""" ) as f:
f.write(__SCREAMING_SNAKE_CASE )
assert zipfile.is_zipfile(str(__SCREAMING_SNAKE_CASE ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(__SCREAMING_SNAKE_CASE ) # but we're right
| 20
| 0
|
from __future__ import annotations
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list[list[int]] ):
'''simple docstring'''
# preprocessing the first row
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(__SCREAMING_SNAKE_CASE ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(__SCREAMING_SNAKE_CASE ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363
|
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def snake_case__ ( self : Any ):
__snake_case : Dict = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
__snake_case : str = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
__snake_case : List[str] = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
__snake_case : str = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 1_60_00,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
__snake_case : Optional[Any] = tempfile.mkdtemp()
__snake_case : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__snake_case : Any = os.path.join(self.tmpdirname , _lowerCAmelCase )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + """\n""" )
with open(self.feature_extraction_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + """\n""" )
# load decoder from hub
__snake_case : Optional[int] = """hf-internal-testing/ngram-beam-search-decoder"""
def snake_case__ ( self : Optional[Any] , **_lowerCAmelCase : Tuple ):
__snake_case : int = self.add_kwargs_tokens_map.copy()
kwargs.update(_lowerCAmelCase )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def snake_case__ ( self : Union[str, Any] , **_lowerCAmelCase : Optional[int] ):
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def snake_case__ ( self : Dict , **_lowerCAmelCase : Tuple ):
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_lowerCAmelCase )
def snake_case__ ( self : List[str] ):
shutil.rmtree(self.tmpdirname )
def snake_case__ ( self : Union[str, Any] ):
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : Tuple = self.get_feature_extractor()
__snake_case : Dict = self.get_decoder()
__snake_case : List[str] = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
__snake_case : Dict = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCAmelCase )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _lowerCAmelCase )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , _lowerCAmelCase )
def snake_case__ ( self : Tuple ):
__snake_case : Tuple = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__snake_case : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def snake_case__ ( self : int ):
__snake_case : Tuple = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(_lowerCAmelCase , """include""" ):
WavaVecaProcessorWithLM(
tokenizer=_lowerCAmelCase , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def snake_case__ ( self : Dict ):
__snake_case : int = self.get_feature_extractor()
__snake_case : str = self.get_tokenizer()
__snake_case : Dict = self.get_decoder()
__snake_case : Any = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
__snake_case : List[Any] = floats_list((3, 10_00) )
__snake_case : Optional[Any] = feature_extractor(_lowerCAmelCase , return_tensors="""np""" )
__snake_case : Tuple = processor(_lowerCAmelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case__ ( self : Optional[int] ):
__snake_case : Any = self.get_feature_extractor()
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : int = self.get_decoder()
__snake_case : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
__snake_case : Optional[int] = """This is a test string"""
__snake_case : Union[str, Any] = processor(text=_lowerCAmelCase )
__snake_case : Dict = tokenizer(_lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case__ ( self : List[str] , _lowerCAmelCase : List[Any]=(2, 10, 16) , _lowerCAmelCase : str=77 ):
np.random.seed(_lowerCAmelCase )
return np.random.rand(*_lowerCAmelCase )
def snake_case__ ( self : Tuple ):
__snake_case : List[str] = self.get_feature_extractor()
__snake_case : List[str] = self.get_tokenizer()
__snake_case : List[str] = self.get_decoder()
__snake_case : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
__snake_case : Optional[int] = self._get_dummy_logits(shape=(10, 16) , seed=13 )
__snake_case : int = processor.decode(_lowerCAmelCase )
__snake_case : Optional[int] = decoder.decode_beams(_lowerCAmelCase )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("""</s> <s> </s>""" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def snake_case__ ( self : List[str] , _lowerCAmelCase : List[str] ):
__snake_case : int = self.get_feature_extractor()
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : int = self.get_decoder()
__snake_case : Dict = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
__snake_case : int = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__snake_case : Tuple = processor.batch_decode(_lowerCAmelCase )
else:
with get_context(_lowerCAmelCase ).Pool() as pool:
__snake_case : int = processor.batch_decode(_lowerCAmelCase , _lowerCAmelCase )
__snake_case : int = list(_lowerCAmelCase )
with get_context("""fork""" ).Pool() as p:
__snake_case : Tuple = decoder.decode_beams_batch(_lowerCAmelCase , _lowerCAmelCase )
__snake_case , __snake_case , __snake_case : List[Any] = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_lowerCAmelCase , decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] , decoded_processor.text )
self.assertListEqual(_lowerCAmelCase , decoded_processor.logit_score )
self.assertListEqual(_lowerCAmelCase , decoded_processor.lm_score )
def snake_case__ ( self : Optional[int] ):
__snake_case : Optional[Any] = self.get_feature_extractor()
__snake_case : int = self.get_tokenizer()
__snake_case : str = self.get_decoder()
__snake_case : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
__snake_case : int = self._get_dummy_logits()
__snake_case : List[str] = 15
__snake_case : Optional[Any] = -20.0
__snake_case : Tuple = -4.0
__snake_case : List[Any] = processor.batch_decode(
_lowerCAmelCase , beam_width=_lowerCAmelCase , beam_prune_logp=_lowerCAmelCase , token_min_logp=_lowerCAmelCase , )
__snake_case : List[str] = decoded_processor_out.text
__snake_case : str = list(_lowerCAmelCase )
with get_context("""fork""" ).Pool() as pool:
__snake_case : Dict = decoder.decode_beams_batch(
_lowerCAmelCase , _lowerCAmelCase , beam_width=_lowerCAmelCase , beam_prune_logp=_lowerCAmelCase , token_min_logp=_lowerCAmelCase , )
__snake_case : int = [d[0][0] for d in decoded_decoder_out]
__snake_case : List[Any] = [d[0][2] for d in decoded_decoder_out]
__snake_case : List[Any] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] , _lowerCAmelCase )
self.assertTrue(np.array_equal(_lowerCAmelCase , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , _lowerCAmelCase , atol=1e-3 ) )
self.assertTrue(np.array_equal(_lowerCAmelCase , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , _lowerCAmelCase , atol=1e-3 ) )
def snake_case__ ( self : Any ):
__snake_case : List[Any] = self.get_feature_extractor()
__snake_case : Any = self.get_tokenizer()
__snake_case : Union[str, Any] = self.get_decoder()
__snake_case : Dict = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
__snake_case : Any = self._get_dummy_logits()
__snake_case : Any = 2.0
__snake_case : int = 5.0
__snake_case : Optional[int] = -20.0
__snake_case : Optional[int] = True
__snake_case : Any = processor.batch_decode(
_lowerCAmelCase , alpha=_lowerCAmelCase , beta=_lowerCAmelCase , unk_score_offset=_lowerCAmelCase , lm_score_boundary=_lowerCAmelCase , )
__snake_case : str = decoded_processor_out.text
__snake_case : int = list(_lowerCAmelCase )
decoder.reset_params(
alpha=_lowerCAmelCase , beta=_lowerCAmelCase , unk_score_offset=_lowerCAmelCase , lm_score_boundary=_lowerCAmelCase , )
with get_context("""fork""" ).Pool() as pool:
__snake_case : Tuple = decoder.decode_beams_batch(
_lowerCAmelCase , _lowerCAmelCase , )
__snake_case : int = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] , _lowerCAmelCase )
__snake_case : List[str] = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , _lowerCAmelCase )
def snake_case__ ( self : Dict ):
__snake_case : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__snake_case : Union[str, Any] = processor.decoder.model_container[processor.decoder._model_key]
__snake_case : Optional[int] = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__snake_case : Union[str, Any] = os.listdir(_lowerCAmelCase )
__snake_case : List[str] = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def snake_case__ ( self : Optional[Any] ):
__snake_case : Union[str, Any] = snapshot_download("""hf-internal-testing/processor_with_lm""" )
__snake_case : Dict = WavaVecaProcessorWithLM.from_pretrained(_lowerCAmelCase )
__snake_case : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key]
__snake_case : Optional[int] = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__snake_case : List[str] = os.listdir(_lowerCAmelCase )
__snake_case : List[Any] = os.listdir(_lowerCAmelCase )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def snake_case__ ( self : Optional[Any] ):
__snake_case : Optional[int] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__snake_case : str = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__snake_case : Optional[int] = floats_list((3, 10_00) )
__snake_case : Union[str, Any] = processor_wavaveca(_lowerCAmelCase , return_tensors="""np""" )
__snake_case : Union[str, Any] = processor_auto(_lowerCAmelCase , return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
__snake_case : Dict = self._get_dummy_logits()
__snake_case : List[Any] = processor_wavaveca.batch_decode(_lowerCAmelCase )
__snake_case : List[Any] = processor_auto.batch_decode(_lowerCAmelCase )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def snake_case__ ( self : str ):
__snake_case : int = self.get_feature_extractor()
__snake_case : List[str] = self.get_tokenizer()
__snake_case : Optional[Any] = self.get_decoder()
__snake_case : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
@staticmethod
def snake_case__ ( _lowerCAmelCase : Any , _lowerCAmelCase : Tuple ):
__snake_case : Union[str, Any] = [d[key] for d in offsets]
return retrieved_list
def snake_case__ ( self : Dict ):
__snake_case : int = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__snake_case : List[str] = self._get_dummy_logits()[0]
__snake_case : str = processor.decode(_lowerCAmelCase , output_word_offsets=_lowerCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """end_offset""" ) , [1, 3, 5] )
def snake_case__ ( self : List[str] ):
__snake_case : Any = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__snake_case : Optional[int] = self._get_dummy_logits()
__snake_case : int = processor.batch_decode(_lowerCAmelCase , output_word_offsets=_lowerCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(_lowerCAmelCase , """word""" ) ) for o in outputs["""word_offsets"""]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """end_offset""" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def snake_case__ ( self : Optional[Any] ):
import torch
__snake_case : Optional[Any] = load_dataset("""common_voice""" , """en""" , split="""train""" , streaming=_lowerCAmelCase )
__snake_case : Any = ds.cast_column("""audio""" , datasets.Audio(sampling_rate=1_60_00 ) )
__snake_case : List[Any] = iter(_lowerCAmelCase )
__snake_case : Optional[int] = next(_lowerCAmelCase )
__snake_case : str = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
__snake_case : str = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__snake_case : List[str] = processor(sample["""audio"""]["""array"""] , return_tensors="""pt""" ).input_values
with torch.no_grad():
__snake_case : Dict = model(_lowerCAmelCase ).logits.cpu().numpy()
__snake_case : Any = processor.decode(logits[0] , output_word_offsets=_lowerCAmelCase )
__snake_case : Optional[Any] = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__snake_case : Dict = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
__snake_case : Dict = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(_lowerCAmelCase , """word""" ) ) , _lowerCAmelCase )
self.assertEqual(""" """.join(self.get_from_offsets(_lowerCAmelCase , """word""" ) ) , output.text )
# output times
__snake_case : Dict = torch.tensor(self.get_from_offsets(_lowerCAmelCase , """start_time""" ) )
__snake_case : Optional[Any] = torch.tensor(self.get_from_offsets(_lowerCAmelCase , """end_time""" ) )
# fmt: off
__snake_case : Optional[Any] = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
__snake_case : Optional[int] = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=0.01 ) )
self.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=0.01 ) )
| 20
| 0
|
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class SCREAMING_SNAKE_CASE__ :
A : int
A : TreeNode | None = None
A : TreeNode | None = None
lowercase_ = namedtuple("CoinsDistribResult", "moves excess")
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : TreeNode | None ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(__SCREAMING_SNAKE_CASE : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(__SCREAMING_SNAKE_CASE : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(__SCREAMING_SNAKE_CASE ) != count_coins(__SCREAMING_SNAKE_CASE ):
raise ValueError("""The nodes number should be same as the number of coins""" )
# Main calculation
def get_distrib(__SCREAMING_SNAKE_CASE : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
__snake_case : Tuple = get_distrib(node.left )
__snake_case : str = get_distrib(node.right )
__snake_case : List[str] = 1 - left_distrib_excess
__snake_case : str = 1 - right_distrib_excess
__snake_case : List[Any] = (
left_distrib_moves
+ right_distrib_moves
+ abs(__SCREAMING_SNAKE_CASE )
+ abs(__SCREAMING_SNAKE_CASE )
)
__snake_case : Any = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return get_distrib(__SCREAMING_SNAKE_CASE )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364
|
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowercase_ = {
"text_branch": "text_model",
"audio_branch": "audio_model.audio_encoder",
"attn": "attention.self",
"self.proj": "output.dense",
"attention.self_mask": "attn_mask",
"mlp.fc1": "intermediate.dense",
"mlp.fc2": "output.dense",
"norm1": "layernorm_before",
"norm2": "layernorm_after",
"bn0": "batch_norm",
}
lowercase_ = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc")
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]=False ):
'''simple docstring'''
__snake_case , __snake_case : str = create_model(
"""HTSAT-tiny""" , """roberta""" , __SCREAMING_SNAKE_CASE , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=__SCREAMING_SNAKE_CASE , fusion_type="""aff_2d""" if enable_fusion else None , )
return model, model_cfg
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
__snake_case : Union[str, Any] = {}
__snake_case : List[Any] = R""".*sequential.(\d+).*"""
__snake_case : Union[str, Any] = R""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__snake_case : Optional[Any] = key.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# replace sequential layers with list
__snake_case : Optional[Any] = re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 )
__snake_case : Dict = key.replace(F'''sequential.{sequential_layer}.''' , F'''layers.{int(__SCREAMING_SNAKE_CASE )//3}.linear.''' )
elif re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case : str = int(re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
__snake_case : List[Any] = 1 if projecton_layer == 0 else 2
__snake_case : Tuple = key.replace(F'''_projection.{projecton_layer}.''' , F'''_projection.linear{transformers_projection_layer}.''' )
if "audio" and "qkv" in key:
# split qkv into query key and value
__snake_case : Optional[int] = value
__snake_case : Any = mixed_qkv.size(0 ) // 3
__snake_case : List[Any] = mixed_qkv[:qkv_dim]
__snake_case : Tuple = mixed_qkv[qkv_dim : qkv_dim * 2]
__snake_case : List[Any] = mixed_qkv[qkv_dim * 2 :]
__snake_case : Any = query_layer
__snake_case : Dict = key_layer
__snake_case : Optional[Any] = value_layer
else:
__snake_case : List[str] = value
return model_state_dict
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=False ):
'''simple docstring'''
__snake_case , __snake_case : List[str] = init_clap(__SCREAMING_SNAKE_CASE , enable_fusion=__SCREAMING_SNAKE_CASE )
clap_model.eval()
__snake_case : Tuple = clap_model.state_dict()
__snake_case : Union[str, Any] = rename_state_dict(__SCREAMING_SNAKE_CASE )
__snake_case : List[Any] = ClapConfig()
__snake_case : Tuple = enable_fusion
__snake_case : Any = ClapModel(__SCREAMING_SNAKE_CASE )
# ignore the spectrogram embedding layer
model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
transformers_config.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not")
lowercase_ = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 20
| 0
|
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
lowercase_ = [
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"
" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"
" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.",
"The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"
" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"
" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"
" body.",
"Amnesty International releases its annual report on the death penalty. The report catalogs the use of"
" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"
" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"
" punishment.",
]
lowercase_ = [
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."
" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"
" had informed his Lufthansa training school of an episode of severe depression, airline says .",
"Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."
" Israel and the United States opposed the move, which could open the door to war crimes investigations against"
" Israelis .",
"Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"
" death . Organization claims that governments around the world are using the threat of terrorism to advance"
" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"
" sentences up by 28% .",
]
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case : Any = calculate_rouge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , bootstrap_aggregation=__SCREAMING_SNAKE_CASE , rouge_keys=["""rouge2""", """rougeL"""] )
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case : List[Any] = calculate_rouge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , bootstrap_aggregation=__SCREAMING_SNAKE_CASE , rouge_keys=["""rouge2"""] )
assert (
pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean()
)
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case : Any = """rougeLsum"""
__snake_case : int = calculate_rouge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , newline_sep=__SCREAMING_SNAKE_CASE , rouge_keys=[k] )[k]
__snake_case : List[Any] = calculate_rouge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , newline_sep=__SCREAMING_SNAKE_CASE , rouge_keys=[k] )[k]
assert score > score_no_sep
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case : Dict = ["""rouge1""", """rouge2""", """rougeL"""]
__snake_case : Optional[Any] = calculate_rouge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , newline_sep=__SCREAMING_SNAKE_CASE , rouge_keys=__SCREAMING_SNAKE_CASE )
__snake_case : Tuple = calculate_rouge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , newline_sep=__SCREAMING_SNAKE_CASE , rouge_keys=__SCREAMING_SNAKE_CASE )
assert score_sep == score_no_sep
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case : Union[str, Any] = [
"""Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""",
]
__snake_case : int = [
"""Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"""
""" the final seconds on board Flight 9525.""",
]
assert calculate_rouge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , newline_sep=__SCREAMING_SNAKE_CASE ) == calculate_rouge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , newline_sep=__SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case : List[Any] = [
"""\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """
]
__snake_case : str = [
""" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."""
]
__snake_case : Optional[Any] = calculate_rouge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , rouge_keys=["""rougeLsum"""] , newline_sep=__SCREAMING_SNAKE_CASE )["""rougeLsum"""]
__snake_case : Union[str, Any] = calculate_rouge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , rouge_keys=["""rougeLsum"""] )["""rougeLsum"""]
assert new_score > prev_score
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case : int = Path("""examples/seq2seq/test_data/wmt_en_ro""" )
__snake_case : Any = calculate_rouge_path(data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) )
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case : int = calculate_rouge_path(
data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) , bootstrap_aggregation=__SCREAMING_SNAKE_CASE )
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 365
|
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = "▁"
lowercase_ = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
lowercase_ = {
"sentencepiece_model_file": "sentencepiece.bpe.model",
"vocab_file": "vocab.txt",
}
lowercase_ = {
"vocab_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
},
"sentencepiece_model_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
},
}
lowercase_ = {
"ernie-m-base": 5_14,
"ernie-m-large": 5_14,
}
lowercase_ = {
"ernie-m-base": {"do_lower_case": False},
"ernie-m-large": {"do_lower_case": False},
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : List[str] = ["input_ids"]
A : Tuple = VOCAB_FILES_NAMES
A : List[Any] = PRETRAINED_INIT_CONFIGURATION
A : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
A : Optional[Any] = RESOURCE_FILES_NAMES
def __init__( self : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict=None , _lowerCAmelCase : Dict=False , _lowerCAmelCase : List[Any]="utf8" , _lowerCAmelCase : Optional[Any]="[UNK]" , _lowerCAmelCase : Optional[int]="[SEP]" , _lowerCAmelCase : List[str]="[PAD]" , _lowerCAmelCase : Dict="[CLS]" , _lowerCAmelCase : List[Any]="[MASK]" , _lowerCAmelCase : Optional[Dict[str, Any]] = None , **_lowerCAmelCase : Any , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__snake_case : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , vocab_file=_lowerCAmelCase , encoding=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
__snake_case : List[Any] = do_lower_case
__snake_case : Any = sentencepiece_model_ckpt
__snake_case : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCAmelCase )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
__snake_case : int = self.load_vocab(filepath=_lowerCAmelCase )
else:
__snake_case : Tuple = {self.sp_model.id_to_piece(_lowerCAmelCase ): id for id in range(self.sp_model.get_piece_size() )}
__snake_case : str = {v: k for k, v in self.vocab.items()}
def snake_case__ ( self : List[Any] , _lowerCAmelCase : List[Any] ):
if text is None:
return None
__snake_case : List[Any] = self.tokenize(_lowerCAmelCase )
__snake_case , __snake_case : Optional[Any] = """""", []
for i, ch in enumerate(_lowerCAmelCase ):
if ch in self.SP_CHAR_MAPPING:
__snake_case : Any = self.SP_CHAR_MAPPING.get(_lowerCAmelCase )
else:
__snake_case : Dict = unicodedata.normalize("""NFKC""" , _lowerCAmelCase )
if self.is_whitespace(_lowerCAmelCase ):
continue
normalized_text += ch
char_mapping.extend([i] * len(_lowerCAmelCase ) )
__snake_case , __snake_case , __snake_case : str = normalized_text, [], 0
if self.do_lower_case:
__snake_case : int = text.lower()
for token in split_tokens:
if token[:1] == "▁":
__snake_case : int = token[1:]
__snake_case : Optional[int] = text[offset:].index(_lowerCAmelCase ) + offset
__snake_case : int = start + len(_lowerCAmelCase )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
__snake_case : str = end
return token_mapping
@property
def snake_case__ ( self : Tuple ):
return len(self.vocab )
def snake_case__ ( self : Dict ):
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self : int ):
__snake_case : str = self.__dict__.copy()
__snake_case : Union[str, Any] = None
return state
def __setstate__( self : Optional[Any] , _lowerCAmelCase : List[str] ):
__snake_case : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__snake_case : Optional[Any] = {}
__snake_case : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def snake_case__ ( self : str , _lowerCAmelCase : Optional[int] ):
return "".join((self.SP_CHAR_MAPPING.get(_lowerCAmelCase , _lowerCAmelCase ) for c in text) )
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : Optional[Any]=64 , _lowerCAmelCase : Optional[int]=0.1 ):
if self.sp_model_kwargs.get("""enable_sampling""" ) is True:
__snake_case : List[str] = True
if self.sp_model_kwargs.get("""alpha""" ) is not None:
__snake_case : Dict = self.sp_model_kwargs.get("""alpha""" )
if self.sp_model_kwargs.get("""nbest_size""" ) is not None:
__snake_case : List[Any] = self.sp_model_kwargs.get("""nbest_size""" )
if not enable_sampling:
__snake_case : str = self.sp_model.EncodeAsPieces(_lowerCAmelCase )
else:
__snake_case : Tuple = self.sp_model.SampleEncodeAsPieces(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__snake_case : Union[str, Any] = []
for pi, piece in enumerate(_lowerCAmelCase ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(_lowerCAmelCase ) and pi != 0:
new_pieces.append(_lowerCAmelCase )
continue
else:
continue
__snake_case : Optional[int] = 0
for i, chunk in enumerate(_lowerCAmelCase ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(_lowerCAmelCase ) or self.is_punct(_lowerCAmelCase ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(_lowerCAmelCase )
__snake_case : Tuple = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
__snake_case : List[str] = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
__snake_case : Tuple = i
if len(_lowerCAmelCase ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : int ):
__snake_case : int = """""".join(_lowerCAmelCase ).replace(_lowerCAmelCase , """ """ ).strip()
return out_string
def snake_case__ ( self : Dict , _lowerCAmelCase : Tuple ):
__snake_case : int = self.convert_ids_to_tokens(_lowerCAmelCase )
__snake_case : Any = """""".join(_lowerCAmelCase ).replace(_lowerCAmelCase , """ """ ).strip()
return out_string
def snake_case__ ( self : Dict , _lowerCAmelCase : Tuple ):
return self.vocab.get(_lowerCAmelCase , self.vocab.get(self.unk_token ) )
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : Dict ):
return self.reverse_vocab.get(_lowerCAmelCase , self.unk_token )
def snake_case__ ( self : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any]=None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__snake_case : Union[str, Any] = [self.cls_token_id]
__snake_case : Optional[Any] = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def snake_case__ ( self : List[str] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any]=None ):
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Optional[Any]=False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1]
return [1] + ([0] * len(_lowerCAmelCase )) + [1]
def snake_case__ ( self : Optional[int] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(_lowerCAmelCase ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(_lowerCAmelCase ) + 1) + [1] * (len(_lowerCAmelCase ) + 3)
def snake_case__ ( self : Tuple , _lowerCAmelCase : List[str] ):
if "\u4e00" <= char <= "\u9fff":
return True
return False
def snake_case__ ( self : List[str] , _lowerCAmelCase : Any ):
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def snake_case__ ( self : int , _lowerCAmelCase : List[Any] ):
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def snake_case__ ( self : str , _lowerCAmelCase : Optional[Any] ):
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(_lowerCAmelCase ) == 1:
__snake_case : Dict = unicodedata.category(_lowerCAmelCase )
if cat == "Zs":
return True
return False
def snake_case__ ( self : str , _lowerCAmelCase : List[Any] ):
__snake_case : Dict = {}
with io.open(_lowerCAmelCase , """r""" , encoding="""utf-8""" ) as f:
for index, line in enumerate(_lowerCAmelCase ):
__snake_case : Tuple = line.rstrip("""\n""" )
__snake_case : List[str] = int(_lowerCAmelCase )
return token_to_idx
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
__snake_case : Optional[int] = 0
if os.path.isdir(_lowerCAmelCase ):
__snake_case : int = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
__snake_case : Optional[Any] = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda _lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
""" Please check that the vocabulary is not corrupted!""" )
__snake_case : Union[str, Any] = token_index
writer.write(token + """\n""" )
index += 1
__snake_case : List[Any] = os.path.join(_lowerCAmelCase , """sentencepiece.bpe.model""" )
with open(_lowerCAmelCase , """wb""" ) as fi:
__snake_case : List[Any] = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (vocab_file,)
| 20
| 0
|
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
lowercase_ = {
"bart": (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"bert": (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-base-cased-finetuned-mrpc": (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"dpr": (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"gpt2": (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlnet": (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlm": (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlm-roberta": (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"transfo-xl": (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"openai-gpt": (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"roberta": (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"layoutlm": (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"roberta-large-mnli": (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"camembert": (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"flaubert": (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"distilbert": (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"distilbert-base-distilled-squad": (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"lxmert": (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"lxmert-visual-feature-encoder": (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"ctrl": (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"albert": (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"t5": (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"electra": (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"wav2vec2": (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : List[Any]=True ):
'''simple docstring'''
if model_type not in MODEL_CLASSES:
raise ValueError(F'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' )
__snake_case : Any = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
__snake_case : int = cached_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , force_download=not use_cached_models )
__snake_case : Dict = config_class.from_json_file(__SCREAMING_SNAKE_CASE )
__snake_case : Tuple = True
__snake_case : Union[str, Any] = True
print(F'''Building TensorFlow model from configuration: {config}''' )
__snake_case : List[Any] = model_class(__SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
__snake_case : Optional[Any] = cached_file(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
__snake_case : List[Any] = load_pytorch_checkpoint_in_tfa_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if compare_with_pt_model:
__snake_case : Tuple = tf_model(tf_model.dummy_inputs , training=__SCREAMING_SNAKE_CASE ) # build the network
__snake_case : List[str] = torch.load(__SCREAMING_SNAKE_CASE , map_location="""cpu""" )
__snake_case : Any = pt_model_class.from_pretrained(
pretrained_model_name_or_path=__SCREAMING_SNAKE_CASE , config=__SCREAMING_SNAKE_CASE , state_dict=__SCREAMING_SNAKE_CASE )
with torch.no_grad():
__snake_case : Union[str, Any] = pt_model(**pt_model.dummy_inputs )
__snake_case : Any = pto[0].numpy()
__snake_case : Optional[int] = tfo[0].numpy()
__snake_case : Optional[int] = np.amax(np.abs(np_pt - np_tf ) )
print(F'''Max absolute difference between models outputs {diff}''' )
assert diff <= 2E-2, F'''Error, model absolute difference is >2e-2: {diff}'''
# Save pytorch-model
print(F'''Save TensorFlow model to {tf_dump_path}''' )
tf_model.save_weights(__SCREAMING_SNAKE_CASE , save_format="""h5""" )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : List[Any]=False , __SCREAMING_SNAKE_CASE : Any=False , ):
'''simple docstring'''
if args_model_type is None:
__snake_case : Tuple = list(MODEL_CLASSES.keys() )
else:
__snake_case : Union[str, Any] = [args_model_type]
for j, model_type in enumerate(__SCREAMING_SNAKE_CASE , start=1 ):
print("""=""" * 1_0_0 )
print(F''' Converting model type {j}/{len(__SCREAMING_SNAKE_CASE )}: {model_type}''' )
print("""=""" * 1_0_0 )
if model_type not in MODEL_CLASSES:
raise ValueError(F'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' )
__snake_case : Optional[int] = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
__snake_case : int = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
__snake_case : Union[str, Any] = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , start=1 ):
print("""-""" * 1_0_0 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F''' Skipping finetuned checkpoint {model_shortcut_name}''' )
continue
__snake_case : List[Any] = model_shortcut_name
elif only_convert_finetuned_models:
print(F''' Skipping not finetuned checkpoint {model_shortcut_name}''' )
continue
print(
F''' Converting checkpoint {i}/{len(__SCREAMING_SNAKE_CASE )}: {model_shortcut_name} - model_type {model_type}''' )
print("""-""" * 1_0_0 )
if config_shortcut_name in aws_config_map:
__snake_case : int = cached_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , force_download=not use_cached_models )
else:
__snake_case : Dict = config_shortcut_name
if model_shortcut_name in aws_model_maps:
__snake_case : Union[str, Any] = cached_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , force_download=not use_cached_models )
else:
__snake_case : List[Any] = model_shortcut_name
if os.path.isfile(__SCREAMING_SNAKE_CASE ):
__snake_case : List[str] = """converted_model"""
convert_pt_checkpoint_to_tf(
model_type=__SCREAMING_SNAKE_CASE , pytorch_checkpoint_path=__SCREAMING_SNAKE_CASE , config_file=__SCREAMING_SNAKE_CASE , tf_dump_path=os.path.join(__SCREAMING_SNAKE_CASE , model_shortcut_name + """-tf_model.h5""" ) , compare_with_pt_model=__SCREAMING_SNAKE_CASE , )
if remove_cached_files:
os.remove(__SCREAMING_SNAKE_CASE )
os.remove(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_dump_path", default=None, type=str, required=True, help="Path to the output Tensorflow dump file."
)
parser.add_argument(
"--model_type",
default=None,
type=str,
help=(
F'''Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and '''
"convert all the models from AWS."
),
)
parser.add_argument(
"--pytorch_checkpoint_path",
default=None,
type=str,
help=(
"Path to the PyTorch checkpoint path or shortcut name to download from AWS. "
"If not given, will download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--config_file",
default=None,
type=str,
help=(
"The config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture. If not given and "
"--pytorch_checkpoint_path is not given or is a shortcut name "
"use the configuration associated to the shortcut name on the AWS"
),
)
parser.add_argument(
"--compare_with_pt_model", action="store_true", help="Compare Tensorflow and PyTorch model predictions."
)
parser.add_argument(
"--use_cached_models",
action="store_true",
help="Use cached models if possible instead of updating to latest checkpoint versions.",
)
parser.add_argument(
"--remove_cached_files",
action="store_true",
help="Remove pytorch models after conversion (save memory when converting in batches).",
)
parser.add_argument("--only_convert_finetuned_models", action="store_true", help="Only convert finetuned models.")
lowercase_ = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 366
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"xlm-mlm-en-2048": "https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json",
"xlm-mlm-ende-1024": "https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json",
"xlm-mlm-enfr-1024": "https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json",
"xlm-mlm-enro-1024": "https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json",
"xlm-mlm-tlm-xnli15-1024": "https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json",
"xlm-mlm-xnli15-1024": "https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json",
"xlm-clm-enfr-1024": "https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json",
"xlm-clm-ende-1024": "https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json",
"xlm-mlm-17-1280": "https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json",
"xlm-mlm-100-1280": "https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : List[str] = "xlm"
A : List[str] = {
"hidden_size": "emb_dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
"n_words": "vocab_size", # For backward compatibility
}
def __init__( self : List[Any] , _lowerCAmelCase : Optional[Any]=3_01_45 , _lowerCAmelCase : Optional[Any]=20_48 , _lowerCAmelCase : Dict=12 , _lowerCAmelCase : int=16 , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : str=True , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : Dict=False , _lowerCAmelCase : Dict=1 , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : Optional[Any]=5_12 , _lowerCAmelCase : List[Any]=20_48**-0.5 , _lowerCAmelCase : List[str]=1e-12 , _lowerCAmelCase : List[Any]=0.02 , _lowerCAmelCase : List[str]=0 , _lowerCAmelCase : Optional[Any]=1 , _lowerCAmelCase : Dict=2 , _lowerCAmelCase : List[str]=3 , _lowerCAmelCase : Tuple=5 , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Tuple="first" , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Dict=None , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : Tuple=5 , _lowerCAmelCase : List[str]=5 , _lowerCAmelCase : Optional[Any]=0 , _lowerCAmelCase : Tuple=0 , _lowerCAmelCase : Union[str, Any]=2 , _lowerCAmelCase : Union[str, Any]=0 , **_lowerCAmelCase : Tuple , ):
__snake_case : Optional[Any] = vocab_size
__snake_case : Tuple = emb_dim
__snake_case : int = n_layers
__snake_case : List[str] = n_heads
__snake_case : Union[str, Any] = dropout
__snake_case : Optional[int] = attention_dropout
__snake_case : Optional[Any] = gelu_activation
__snake_case : Tuple = sinusoidal_embeddings
__snake_case : List[Any] = causal
__snake_case : Dict = asm
__snake_case : int = n_langs
__snake_case : str = use_lang_emb
__snake_case : Dict = layer_norm_eps
__snake_case : List[Any] = bos_index
__snake_case : Union[str, Any] = eos_index
__snake_case : Dict = pad_index
__snake_case : Any = unk_index
__snake_case : Dict = mask_index
__snake_case : Any = is_encoder
__snake_case : Dict = max_position_embeddings
__snake_case : Optional[Any] = embed_init_std
__snake_case : List[Any] = init_std
__snake_case : str = summary_type
__snake_case : Optional[Any] = summary_use_proj
__snake_case : str = summary_activation
__snake_case : Optional[int] = summary_proj_to_labels
__snake_case : Dict = summary_first_dropout
__snake_case : Dict = start_n_top
__snake_case : int = end_n_top
__snake_case : str = mask_token_id
__snake_case : int = lang_id
if "n_words" in kwargs:
__snake_case : Dict = kwargs["""n_words"""]
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
@property
def snake_case__ ( self : Dict ):
if self.task == "multiple-choice":
__snake_case : Optional[int] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__snake_case : Tuple = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 20
| 0
|
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE__ :
@staticmethod
def snake_case__ ( *_lowerCAmelCase : Tuple , **_lowerCAmelCase : str ):
pass
@is_pipeline_test
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
A : Optional[Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : int ):
__snake_case : Dict = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" )
__snake_case : Any = [
{
"""image""": Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""question""": """How many cats are there?""",
},
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""question""": """How many cats are there?""",
},
]
return vqa_pipeline, examples
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] ):
__snake_case : Union[str, Any] = vqa_pipeline(_lowerCAmelCase , top_k=1 )
self.assertEqual(
_lowerCAmelCase , [
[{"""score""": ANY(_lowerCAmelCase ), """answer""": ANY(_lowerCAmelCase )}],
[{"""score""": ANY(_lowerCAmelCase ), """answer""": ANY(_lowerCAmelCase )}],
] , )
@require_torch
def snake_case__ ( self : List[str] ):
__snake_case : Dict = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" )
__snake_case : int = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
__snake_case : str = """How many cats are there?"""
__snake_case : Tuple = vqa_pipeline(image=_lowerCAmelCase , question="""How many cats are there?""" , top_k=2 )
self.assertEqual(
_lowerCAmelCase , [{"""score""": ANY(_lowerCAmelCase ), """answer""": ANY(_lowerCAmelCase )}, {"""score""": ANY(_lowerCAmelCase ), """answer""": ANY(_lowerCAmelCase )}] )
__snake_case : Any = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
_lowerCAmelCase , [{"""score""": ANY(_lowerCAmelCase ), """answer""": ANY(_lowerCAmelCase )}, {"""score""": ANY(_lowerCAmelCase ), """answer""": ANY(_lowerCAmelCase )}] )
@slow
@require_torch
def snake_case__ ( self : str ):
__snake_case : Dict = pipeline("""visual-question-answering""" , model="""dandelin/vilt-b32-finetuned-vqa""" )
__snake_case : List[Any] = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
__snake_case : int = """How many cats are there?"""
__snake_case : int = vqa_pipeline(image=_lowerCAmelCase , question=_lowerCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [{"""score""": 0.8799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}] )
__snake_case : Union[str, Any] = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [{"""score""": 0.8799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}] )
__snake_case : List[str] = vqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [[{"""score""": 0.8799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}]] * 2 , )
@require_tf
@unittest.skip("""Visual question answering not implemented in TF""" )
def snake_case__ ( self : Any ):
pass
| 367
|
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : List[Any] = "encodec"
def __init__( self : Tuple , _lowerCAmelCase : Union[str, Any]=[1.5, 3.0, 6.0, 12.0, 24.0] , _lowerCAmelCase : Tuple=2_40_00 , _lowerCAmelCase : List[Any]=1 , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : int=1_28 , _lowerCAmelCase : List[Any]=32 , _lowerCAmelCase : Optional[Any]=1 , _lowerCAmelCase : Union[str, Any]=[8, 5, 4, 2] , _lowerCAmelCase : str="weight_norm" , _lowerCAmelCase : Tuple=7 , _lowerCAmelCase : str=7 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : int=2 , _lowerCAmelCase : str=True , _lowerCAmelCase : Dict="reflect" , _lowerCAmelCase : Tuple=2 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : int=1.0 , _lowerCAmelCase : Optional[int]=10_24 , _lowerCAmelCase : int=None , _lowerCAmelCase : List[str]=True , **_lowerCAmelCase : List[Any] , ):
__snake_case : Optional[int] = target_bandwidths
__snake_case : int = sampling_rate
__snake_case : List[Any] = audio_channels
__snake_case : str = normalize
__snake_case : Union[str, Any] = chunk_length_s
__snake_case : Union[str, Any] = overlap
__snake_case : Union[str, Any] = hidden_size
__snake_case : Union[str, Any] = num_filters
__snake_case : Optional[Any] = num_residual_layers
__snake_case : List[Any] = upsampling_ratios
__snake_case : List[str] = norm_type
__snake_case : Union[str, Any] = kernel_size
__snake_case : Optional[int] = last_kernel_size
__snake_case : Optional[Any] = residual_kernel_size
__snake_case : Dict = dilation_growth_rate
__snake_case : int = use_causal_conv
__snake_case : Tuple = pad_mode
__snake_case : str = compress
__snake_case : Optional[Any] = num_lstm_layers
__snake_case : List[Any] = trim_right_ratio
__snake_case : Any = codebook_size
__snake_case : int = codebook_dim if codebook_dim is not None else hidden_size
__snake_case : int = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''' )
super().__init__(**_lowerCAmelCase )
@property
def snake_case__ ( self : int ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def snake_case__ ( self : int ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def snake_case__ ( self : Union[str, Any] ):
__snake_case : List[str] = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def snake_case__ ( self : Tuple ):
return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 20
| 0
|
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return abs(__SCREAMING_SNAKE_CASE ) if a == 0 else greatest_common_divisor(b % a , __SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
while y: # --> when y=0 then loop will terminate and return x as final GCD.
__snake_case : Dict = y, x % y
return abs(__SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( ):
'''simple docstring'''
try:
__snake_case : Union[str, Any] = input("""Enter two integers separated by comma (,): """ ).split(""",""" )
__snake_case : Optional[int] = int(nums[0] )
__snake_case : Dict = int(nums[1] )
print(
F'''greatest_common_divisor({num_a}, {num_a}) = '''
F'''{greatest_common_divisor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}''' )
print(F'''By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}''' )
except (IndexError, UnboundLocalError, ValueError):
print("""Wrong input""" )
if __name__ == "__main__":
main()
| 368
|
from __future__ import annotations
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
__snake_case : str = []
__snake_case , __snake_case : List[str] = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
__snake_case : List[Any] = result + left + right
return input_list
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list ):
'''simple docstring'''
if len(__SCREAMING_SNAKE_CASE ) <= 1:
return input_list
__snake_case : Union[str, Any] = list(__SCREAMING_SNAKE_CASE )
# iteration for two-way merging
__snake_case : Tuple = 2
while p <= len(__SCREAMING_SNAKE_CASE ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ):
__snake_case : List[str] = i
__snake_case : str = i + p - 1
__snake_case : Optional[Any] = (low + high + 1) // 2
__snake_case : str = merge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# final merge of last two parts
if p * 2 >= len(__SCREAMING_SNAKE_CASE ):
__snake_case : List[str] = i
__snake_case : str = merge(__SCREAMING_SNAKE_CASE , 0 , __SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
lowercase_ = input("Enter numbers separated by a comma:\n").strip()
if user_input == "":
lowercase_ = []
else:
lowercase_ = [int(item.strip()) for item in user_input.split(",")]
print(iter_merge_sort(unsorted))
| 20
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"uclanlp/visualbert-vqa": "https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json",
"uclanlp/visualbert-vqa-pre": "https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json",
"uclanlp/visualbert-vqa-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"
),
"uclanlp/visualbert-vcr": "https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json",
"uclanlp/visualbert-vcr-pre": "https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json",
"uclanlp/visualbert-vcr-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"
),
"uclanlp/visualbert-nlvr2": "https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json",
"uclanlp/visualbert-nlvr2-pre": "https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json",
"uclanlp/visualbert-nlvr2-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : Tuple = "visual_bert"
def __init__( self : List[str] , _lowerCAmelCase : Dict=3_05_22 , _lowerCAmelCase : Any=7_68 , _lowerCAmelCase : int=5_12 , _lowerCAmelCase : Union[str, Any]=12 , _lowerCAmelCase : Tuple=12 , _lowerCAmelCase : Tuple=30_72 , _lowerCAmelCase : str="gelu" , _lowerCAmelCase : Dict=0.1 , _lowerCAmelCase : str=0.1 , _lowerCAmelCase : Dict=5_12 , _lowerCAmelCase : Union[str, Any]=2 , _lowerCAmelCase : Optional[int]=0.02 , _lowerCAmelCase : Union[str, Any]=1e-12 , _lowerCAmelCase : int=False , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : Union[str, Any]=1 , _lowerCAmelCase : str=0 , _lowerCAmelCase : Tuple=2 , **_lowerCAmelCase : Any , ):
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
__snake_case : Optional[Any] = vocab_size
__snake_case : List[str] = max_position_embeddings
__snake_case : str = hidden_size
__snake_case : int = visual_embedding_dim
__snake_case : Any = num_hidden_layers
__snake_case : Union[str, Any] = num_attention_heads
__snake_case : Dict = intermediate_size
__snake_case : Any = hidden_act
__snake_case : Optional[int] = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : Optional[Any] = initializer_range
__snake_case : Tuple = type_vocab_size
__snake_case : int = layer_norm_eps
__snake_case : List[str] = bypass_transformer
__snake_case : Union[str, Any] = special_visual_initialize
| 369
|
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
lowercase_ = "Usage of script: script_name <size_of_canvas:int>"
lowercase_ = [0] * 1_00 + [1] * 10
random.shuffle(choice)
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
__snake_case : List[str] = [[False for i in range(__SCREAMING_SNAKE_CASE )] for j in range(__SCREAMING_SNAKE_CASE )]
return canvas
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list[list[bool]] ):
'''simple docstring'''
for i, row in enumerate(__SCREAMING_SNAKE_CASE ):
for j, _ in enumerate(__SCREAMING_SNAKE_CASE ):
__snake_case : int = bool(random.getrandbits(1 ) )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list[list[bool]] ):
'''simple docstring'''
__snake_case : Union[str, Any] = np.array(__SCREAMING_SNAKE_CASE )
__snake_case : List[Any] = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(__SCREAMING_SNAKE_CASE ):
for c, pt in enumerate(__SCREAMING_SNAKE_CASE ):
__snake_case : Optional[Any] = __judge_point(
__SCREAMING_SNAKE_CASE , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
__snake_case : List[str] = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
__snake_case : list[list[bool]] = current_canvas.tolist()
return return_canvas
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : bool , __SCREAMING_SNAKE_CASE : list[list[bool]] ):
'''simple docstring'''
__snake_case : Any = 0
__snake_case : Dict = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
__snake_case : str = pt
if pt:
if alive < 2:
__snake_case : Optional[Any] = False
elif alive == 2 or alive == 3:
__snake_case : Union[str, Any] = True
elif alive > 3:
__snake_case : Optional[int] = False
else:
if alive == 3:
__snake_case : List[Any] = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
lowercase_ = int(sys.argv[1])
# main working structure of this module.
lowercase_ = create_canvas(canvas_size)
seed(c)
lowercase_ , lowercase_ = plt.subplots()
fig.show()
lowercase_ = ListedColormap(["w", "k"])
try:
while True:
lowercase_ = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 20
| 0
|
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , unittest.TestCase ):
A : Optional[int] = ProphetNetTokenizer
A : Union[str, Any] = False
def snake_case__ ( self : Union[str, Any] ):
super().setUp()
__snake_case : Any = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
__snake_case : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def snake_case__ ( self : Optional[int] , _lowerCAmelCase : Optional[int] ):
__snake_case : Union[str, Any] = """UNwant\u00E9d,running"""
__snake_case : Optional[int] = """unwanted, running"""
return input_text, output_text
def snake_case__ ( self : List[Any] ):
__snake_case : Optional[Any] = self.tokenizer_class(self.vocab_file )
__snake_case : int = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(_lowerCAmelCase , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [9, 6, 7, 12, 10, 11] )
def snake_case__ ( self : Tuple ):
__snake_case : str = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def snake_case__ ( self : Optional[Any] ):
__snake_case : Optional[int] = BasicTokenizer(do_lower_case=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def snake_case__ ( self : Optional[int] ):
__snake_case : Optional[Any] = BasicTokenizer(do_lower_case=_lowerCAmelCase , strip_accents=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def snake_case__ ( self : Union[str, Any] ):
__snake_case : List[str] = BasicTokenizer(do_lower_case=_lowerCAmelCase , strip_accents=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def snake_case__ ( self : Optional[int] ):
__snake_case : Any = BasicTokenizer(do_lower_case=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def snake_case__ ( self : Union[str, Any] ):
__snake_case : Any = BasicTokenizer(do_lower_case=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def snake_case__ ( self : int ):
__snake_case : Optional[Any] = BasicTokenizer(do_lower_case=_lowerCAmelCase , strip_accents=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def snake_case__ ( self : int ):
__snake_case : Optional[Any] = BasicTokenizer(do_lower_case=_lowerCAmelCase , strip_accents=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def snake_case__ ( self : Any ):
__snake_case : Union[str, Any] = BasicTokenizer(do_lower_case=_lowerCAmelCase , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def snake_case__ ( self : str ):
__snake_case : Union[str, Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
__snake_case : List[str] = {}
for i, token in enumerate(_lowerCAmelCase ):
__snake_case : int = i
__snake_case : Any = WordpieceTokenizer(vocab=_lowerCAmelCase , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
@require_torch
def snake_case__ ( self : List[str] ):
__snake_case : int = self.tokenizer_class.from_pretrained("""microsoft/prophetnet-large-uncased""" )
__snake_case : Dict = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__snake_case : Optional[Any] = [10_37, 21_46, 2_04_23, 20_05, 76_80, 78_49, 39_89, 10_12, 1_02]
__snake_case : str = tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="""pt""" )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
__snake_case : int = list(batch.input_ids.numpy()[0] )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def snake_case__ ( self : int ):
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def snake_case__ ( self : Any ):
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def snake_case__ ( self : int ):
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
@slow
def snake_case__ ( self : Dict ):
__snake_case : Union[str, Any] = self.tokenizer_class.from_pretrained("""microsoft/prophetnet-large-uncased""" )
__snake_case : Union[str, Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=_lowerCAmelCase )
__snake_case : Union[str, Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_lowerCAmelCase )
__snake_case : str = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase )
__snake_case : List[str] = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase )
assert encoded_sentence == text + [1_02]
assert encoded_pair == text + [1_02] + text_a + [1_02]
| 370
|
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def __lowerCAmelCase ( *__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Union[Dict, Any]] = None , __SCREAMING_SNAKE_CASE : Any=True , __SCREAMING_SNAKE_CASE : int=2 ):
'''simple docstring'''
from .. import __version__
__snake_case : List[Any] = take_from
__snake_case : List[Any] = ()
if not isinstance(args[0] , __SCREAMING_SNAKE_CASE ):
__snake_case : str = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(__SCREAMING_SNAKE_CASE ).base_version ) >= version.parse(__SCREAMING_SNAKE_CASE ):
raise ValueError(
F'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
F''' version {__version__} is >= {version_name}''' )
__snake_case : Optional[Any] = None
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(__SCREAMING_SNAKE_CASE ),)
__snake_case : Optional[Any] = F'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
values += (getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ),)
__snake_case : Any = F'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
__snake_case : Tuple = F'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
__snake_case : Optional[Any] = warning + """ """ if standard_warn else """"""
warnings.warn(warning + message , __SCREAMING_SNAKE_CASE , stacklevel=__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) > 0:
__snake_case : Dict = inspect.getouterframes(inspect.currentframe() )[1]
__snake_case : int = call_frame.filename
__snake_case : int = call_frame.lineno
__snake_case : List[str] = call_frame.function
__snake_case , __snake_case : List[Any] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(__SCREAMING_SNAKE_CASE ) == 0:
return
elif len(__SCREAMING_SNAKE_CASE ) == 1:
return values[0]
return values
| 20
| 0
|
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
__snake_case : Tuple = hex_num.strip()
if not hex_num:
raise ValueError("""No value was passed to the function""" )
__snake_case : List[Any] = hex_num[0] == """-"""
if is_negative:
__snake_case : int = hex_num[1:]
try:
__snake_case : Any = int(__SCREAMING_SNAKE_CASE , 1_6 )
except ValueError:
raise ValueError("""Invalid value was passed to the function""" )
__snake_case : Union[str, Any] = """"""
while int_num > 0:
__snake_case : int = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("""-""" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 371
|
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
lowercase_ = {
"bart": (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"bert": (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-base-cased-finetuned-mrpc": (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"dpr": (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"gpt2": (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlnet": (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlm": (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlm-roberta": (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"transfo-xl": (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"openai-gpt": (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"roberta": (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"layoutlm": (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"roberta-large-mnli": (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"camembert": (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"flaubert": (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"distilbert": (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"distilbert-base-distilled-squad": (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"lxmert": (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"lxmert-visual-feature-encoder": (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"ctrl": (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"albert": (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"t5": (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"electra": (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"wav2vec2": (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : List[Any]=True ):
'''simple docstring'''
if model_type not in MODEL_CLASSES:
raise ValueError(F'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' )
__snake_case , __snake_case , __snake_case , __snake_case : Any = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
__snake_case : int = cached_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , force_download=not use_cached_models )
__snake_case : Dict = config_class.from_json_file(__SCREAMING_SNAKE_CASE )
__snake_case : Tuple = True
__snake_case : Union[str, Any] = True
print(F'''Building TensorFlow model from configuration: {config}''' )
__snake_case : List[Any] = model_class(__SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
__snake_case : Optional[Any] = cached_file(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
__snake_case : List[Any] = load_pytorch_checkpoint_in_tfa_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if compare_with_pt_model:
__snake_case : Tuple = tf_model(tf_model.dummy_inputs , training=__SCREAMING_SNAKE_CASE ) # build the network
__snake_case : List[str] = torch.load(__SCREAMING_SNAKE_CASE , map_location="""cpu""" )
__snake_case : Any = pt_model_class.from_pretrained(
pretrained_model_name_or_path=__SCREAMING_SNAKE_CASE , config=__SCREAMING_SNAKE_CASE , state_dict=__SCREAMING_SNAKE_CASE )
with torch.no_grad():
__snake_case : Union[str, Any] = pt_model(**pt_model.dummy_inputs )
__snake_case : Any = pto[0].numpy()
__snake_case : Optional[int] = tfo[0].numpy()
__snake_case : Optional[int] = np.amax(np.abs(np_pt - np_tf ) )
print(F'''Max absolute difference between models outputs {diff}''' )
assert diff <= 2E-2, F'''Error, model absolute difference is >2e-2: {diff}'''
# Save pytorch-model
print(F'''Save TensorFlow model to {tf_dump_path}''' )
tf_model.save_weights(__SCREAMING_SNAKE_CASE , save_format="""h5""" )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : List[Any]=False , __SCREAMING_SNAKE_CASE : Any=False , ):
'''simple docstring'''
if args_model_type is None:
__snake_case : Tuple = list(MODEL_CLASSES.keys() )
else:
__snake_case : Union[str, Any] = [args_model_type]
for j, model_type in enumerate(__SCREAMING_SNAKE_CASE , start=1 ):
print("""=""" * 1_0_0 )
print(F''' Converting model type {j}/{len(__SCREAMING_SNAKE_CASE )}: {model_type}''' )
print("""=""" * 1_0_0 )
if model_type not in MODEL_CLASSES:
raise ValueError(F'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' )
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Optional[int] = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
__snake_case : int = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
__snake_case : Union[str, Any] = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , start=1 ):
print("""-""" * 1_0_0 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F''' Skipping finetuned checkpoint {model_shortcut_name}''' )
continue
__snake_case : List[Any] = model_shortcut_name
elif only_convert_finetuned_models:
print(F''' Skipping not finetuned checkpoint {model_shortcut_name}''' )
continue
print(
F''' Converting checkpoint {i}/{len(__SCREAMING_SNAKE_CASE )}: {model_shortcut_name} - model_type {model_type}''' )
print("""-""" * 1_0_0 )
if config_shortcut_name in aws_config_map:
__snake_case : int = cached_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , force_download=not use_cached_models )
else:
__snake_case : Dict = config_shortcut_name
if model_shortcut_name in aws_model_maps:
__snake_case : Union[str, Any] = cached_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , force_download=not use_cached_models )
else:
__snake_case : List[Any] = model_shortcut_name
if os.path.isfile(__SCREAMING_SNAKE_CASE ):
__snake_case : List[str] = """converted_model"""
convert_pt_checkpoint_to_tf(
model_type=__SCREAMING_SNAKE_CASE , pytorch_checkpoint_path=__SCREAMING_SNAKE_CASE , config_file=__SCREAMING_SNAKE_CASE , tf_dump_path=os.path.join(__SCREAMING_SNAKE_CASE , model_shortcut_name + """-tf_model.h5""" ) , compare_with_pt_model=__SCREAMING_SNAKE_CASE , )
if remove_cached_files:
os.remove(__SCREAMING_SNAKE_CASE )
os.remove(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_dump_path", default=None, type=str, required=True, help="Path to the output Tensorflow dump file."
)
parser.add_argument(
"--model_type",
default=None,
type=str,
help=(
F'''Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and '''
"convert all the models from AWS."
),
)
parser.add_argument(
"--pytorch_checkpoint_path",
default=None,
type=str,
help=(
"Path to the PyTorch checkpoint path or shortcut name to download from AWS. "
"If not given, will download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--config_file",
default=None,
type=str,
help=(
"The config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture. If not given and "
"--pytorch_checkpoint_path is not given or is a shortcut name "
"use the configuration associated to the shortcut name on the AWS"
),
)
parser.add_argument(
"--compare_with_pt_model", action="store_true", help="Compare Tensorflow and PyTorch model predictions."
)
parser.add_argument(
"--use_cached_models",
action="store_true",
help="Use cached models if possible instead of updating to latest checkpoint versions.",
)
parser.add_argument(
"--remove_cached_files",
action="store_true",
help="Remove pytorch models after conversion (save memory when converting in batches).",
)
parser.add_argument("--only_convert_finetuned_models", action="store_true", help="Only convert finetuned models.")
lowercase_ = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 20
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : int = "vit"
def __init__( self : Any , _lowerCAmelCase : Optional[int]=7_68 , _lowerCAmelCase : List[str]=12 , _lowerCAmelCase : Dict=12 , _lowerCAmelCase : int=30_72 , _lowerCAmelCase : Tuple="gelu" , _lowerCAmelCase : Any=0.0 , _lowerCAmelCase : Any=0.0 , _lowerCAmelCase : Dict=0.02 , _lowerCAmelCase : str=1e-12 , _lowerCAmelCase : int=2_24 , _lowerCAmelCase : int=16 , _lowerCAmelCase : List[str]=3 , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Any=16 , **_lowerCAmelCase : Dict , ):
super().__init__(**_lowerCAmelCase )
__snake_case : Optional[Any] = hidden_size
__snake_case : Union[str, Any] = num_hidden_layers
__snake_case : List[str] = num_attention_heads
__snake_case : List[Any] = intermediate_size
__snake_case : Tuple = hidden_act
__snake_case : int = hidden_dropout_prob
__snake_case : str = attention_probs_dropout_prob
__snake_case : Dict = initializer_range
__snake_case : Any = layer_norm_eps
__snake_case : str = image_size
__snake_case : str = patch_size
__snake_case : Optional[Any] = num_channels
__snake_case : List[Any] = qkv_bias
__snake_case : Dict = encoder_stride
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : str = version.parse("1.11" )
@property
def snake_case__ ( self : Optional[int] ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def snake_case__ ( self : Optional[Any] ):
return 1e-4
| 350
|
import random
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
__snake_case , __snake_case , __snake_case : Tuple = [], [], []
for element in data:
if element < pivot:
less.append(__SCREAMING_SNAKE_CASE )
elif element > pivot:
greater.append(__SCREAMING_SNAKE_CASE )
else:
equal.append(__SCREAMING_SNAKE_CASE )
return less, equal, greater
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(__SCREAMING_SNAKE_CASE ) or index < 0:
return None
__snake_case : int = items[random.randint(0 , len(__SCREAMING_SNAKE_CASE ) - 1 )]
__snake_case : Tuple = 0
__snake_case , __snake_case , __snake_case : List[str] = _partition(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case : Optional[Any] = len(__SCREAMING_SNAKE_CASE )
__snake_case : int = len(__SCREAMING_SNAKE_CASE )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# must be in larger
else:
return quick_select(__SCREAMING_SNAKE_CASE , index - (m + count) )
| 20
| 0
|
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , unittest.TestCase ):
A : Tuple = BioGptTokenizer
A : List[Any] = False
def snake_case__ ( self : Union[str, Any] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__snake_case : Any = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
__snake_case : Optional[Any] = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
__snake_case : Tuple = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
__snake_case : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__snake_case : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(_lowerCAmelCase ) )
def snake_case__ ( self : List[Any] , _lowerCAmelCase : Tuple ):
__snake_case : Optional[int] = """lower newer"""
__snake_case : Any = """lower newer"""
return input_text, output_text
def snake_case__ ( self : Any ):
__snake_case : Dict = BioGptTokenizer(self.vocab_file , self.merges_file )
__snake_case : int = """lower"""
__snake_case : List[str] = ["""low""", """er</w>"""]
__snake_case : Any = tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
__snake_case : Any = tokens + ["""<unk>"""]
__snake_case : int = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , _lowerCAmelCase )
@slow
def snake_case__ ( self : List[Any] ):
__snake_case : Union[str, Any] = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
__snake_case : Union[str, Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=_lowerCAmelCase )
__snake_case : List[str] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_lowerCAmelCase )
__snake_case : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase )
__snake_case : Dict = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 351
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowercase_ = {
"vocab_file": {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"
),
}
}
lowercase_ = {
"junnyu/roformer_chinese_small": 15_36,
"junnyu/roformer_chinese_base": 15_36,
"junnyu/roformer_chinese_char_small": 5_12,
"junnyu/roformer_chinese_char_base": 5_12,
"junnyu/roformer_small_discriminator": 1_28,
"junnyu/roformer_small_generator": 1_28,
}
lowercase_ = {
"junnyu/roformer_chinese_small": {"do_lower_case": True},
"junnyu/roformer_chinese_base": {"do_lower_case": True},
"junnyu/roformer_chinese_char_small": {"do_lower_case": True},
"junnyu/roformer_chinese_char_base": {"do_lower_case": True},
"junnyu/roformer_small_discriminator": {"do_lower_case": True},
"junnyu/roformer_small_generator": {"do_lower_case": True},
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : Optional[int] = VOCAB_FILES_NAMES
A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : int = PRETRAINED_INIT_CONFIGURATION
A : List[str] = RoFormerTokenizer
def __init__( self : Optional[Any] , _lowerCAmelCase : Dict=None , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Any="[UNK]" , _lowerCAmelCase : int="[SEP]" , _lowerCAmelCase : Optional[int]="[PAD]" , _lowerCAmelCase : Optional[int]="[CLS]" , _lowerCAmelCase : Optional[Any]="[MASK]" , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Optional[Any]=None , **_lowerCAmelCase : Dict , ):
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , )
__snake_case : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("""lowercase""" , _lowerCAmelCase ) != do_lower_case
or pre_tok_state.get("""strip_accents""" , _lowerCAmelCase ) != strip_accents
):
__snake_case : Tuple = getattr(_lowerCAmelCase , pre_tok_state.pop("""type""" ) )
__snake_case : List[Any] = do_lower_case
__snake_case : Optional[Any] = strip_accents
__snake_case : List[str] = pre_tok_class(**_lowerCAmelCase )
__snake_case : Optional[Any] = do_lower_case
def __getstate__( self : Optional[Any] ):
__snake_case : Optional[int] = self.__dict__.copy()
__snake_case : Optional[Any] = BertPreTokenizer()
return state
def __setstate__( self : str , _lowerCAmelCase : Dict ):
__snake_case : str = d
__snake_case : int = self.__dict__["""_tokenizer"""].get_vocab()
__snake_case : List[str] = PreTokenizer.custom(JiebaPreTokenizer(_lowerCAmelCase ) )
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int]=None ):
__snake_case : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
__snake_case : Optional[int] = [self.sep_token_id]
__snake_case : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__ ( self : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
__snake_case : int = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
def snake_case__ ( self : int , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Union[str, Any]=False , **_lowerCAmelCase : Tuple , ):
__snake_case : Tuple = BertPreTokenizer()
return super().save_pretrained(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
| 20
| 0
|
lowercase_ = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case : List[str] = input("""Enter message: """ )
__snake_case : str = input("""Enter key [alphanumeric]: """ )
__snake_case : Optional[int] = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
__snake_case : Optional[Any] = """encrypt"""
__snake_case : Optional[int] = encrypt_message(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif mode.lower().startswith("""d""" ):
__snake_case : Dict = """decrypt"""
__snake_case : Any = decrypt_message(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
print(F'''\n{mode.title()}ed message:''' )
print(__SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
return translate_message(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , """encrypt""" )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
return translate_message(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , """decrypt""" )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
__snake_case : Optional[Any] = []
__snake_case : Tuple = 0
__snake_case : Optional[Any] = key.upper()
for symbol in message:
__snake_case : int = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(__SCREAMING_SNAKE_CASE )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(__SCREAMING_SNAKE_CASE ):
__snake_case : Optional[int] = 0
else:
translated.append(__SCREAMING_SNAKE_CASE )
return "".join(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 352
|
from __future__ import annotations
import math
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : bool , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if len(__SCREAMING_SNAKE_CASE ) == 0:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , )
return min(
minimax(depth + 1 , node_index * 2 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , )
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case : str = [9_0, 2_3, 6, 3_3, 2_1, 6_5, 1_2_3, 3_4_4_2_3]
__snake_case : Optional[Any] = math.log(len(__SCREAMING_SNAKE_CASE ) , 2 )
print("""Optimal value : """ , end="""""" )
print(minimax(0 , 0 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 20
| 0
|
import argparse
import os
import re
import packaging.version
lowercase_ = "examples/"
lowercase_ = {
"examples": (re.compile(r"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(r"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(r"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), r"\1version=\"VERSION\","),
"doc": (re.compile(r"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
lowercase_ = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
lowercase_ = "README.md"
def __lowerCAmelCase ( _A : Tuple , _A : Tuple , _A : Optional[int] ):
'''simple docstring'''
with open(__SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__snake_case : int = f.read()
__snake_case : int = REPLACE_PATTERNS[pattern]
__snake_case : Tuple = replace.replace("""VERSION""" , __SCREAMING_SNAKE_CASE )
__snake_case : Optional[Any] = re_pattern.sub(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
with open(__SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(__SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( _A : Dict ):
'''simple docstring'''
for folder, directories, fnames in os.walk(__SCREAMING_SNAKE_CASE ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , pattern="""examples""" )
def __lowerCAmelCase ( _A : Union[str, Any] , _A : List[str]=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if not patch:
update_version_in_examples(__SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case : Union[str, Any] = """🤗 Transformers currently provides the following architectures"""
__snake_case : Optional[Any] = """1. Want to contribute a new model?"""
with open(__SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__snake_case : str = f.readlines()
# Find the start of the list.
__snake_case : Tuple = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__snake_case : List[str] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
__snake_case : str = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(__SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(__SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( ):
'''simple docstring'''
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
__snake_case : Any = f.read()
__snake_case : Tuple = REPLACE_PATTERNS["""init"""][0].search(__SCREAMING_SNAKE_CASE ).groups()[0]
return packaging.version.parse(__SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( _A : List[Any]=False ):
'''simple docstring'''
__snake_case : Optional[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
__snake_case : str = default_version.base_version
elif patch:
__snake_case : Tuple = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
__snake_case : Optional[int] = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
__snake_case : Optional[int] = input(F'''Which version are you releasing? [{default_version}]''' )
if len(__SCREAMING_SNAKE_CASE ) == 0:
__snake_case : Union[str, Any] = default_version
print(F'''Updating version to {version}.''' )
global_version_update(__SCREAMING_SNAKE_CASE , patch=__SCREAMING_SNAKE_CASE )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case : Union[str, Any] = get_version()
__snake_case : int = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
__snake_case : Optional[int] = current_version.base_version
# Check with the user we got that right.
__snake_case : Dict = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(__SCREAMING_SNAKE_CASE ) == 0:
__snake_case : Optional[Any] = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(__SCREAMING_SNAKE_CASE )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
lowercase_ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 353
|
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
lowercase_ = None
lowercase_ = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
lowercase_ = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class SCREAMING_SNAKE_CASE__ :
A : bool = True
A : Optional[str] = None
# Automatically constructed
A : ClassVar[str] = "PIL.Image.Image"
A : ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
A : str = field(default="Image" , init=__UpperCamelCase , repr=__UpperCamelCase )
def __call__( self : Any ):
return self.pa_type
def snake_case__ ( self : List[Any] , _lowerCAmelCase : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__snake_case : str = np.array(_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return {"path": value, "bytes": None}
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return {"path": None, "bytes": value}
elif isinstance(_lowerCAmelCase , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(_lowerCAmelCase )
elif isinstance(_lowerCAmelCase , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(_lowerCAmelCase )
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def snake_case__ ( self : List[str] , _lowerCAmelCase : dict , _lowerCAmelCase : Dict=None ):
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Image(decode=True) instead.""" )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support decoding images, please install 'Pillow'.""" )
if token_per_repo_id is None:
__snake_case : Tuple = {}
__snake_case , __snake_case : str = value["""path"""], value["""bytes"""]
if bytes_ is None:
if path is None:
raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(_lowerCAmelCase ):
__snake_case : str = PIL.Image.open(_lowerCAmelCase )
else:
__snake_case : List[str] = path.split("""::""" )[-1]
try:
__snake_case : Dict = string_to_dict(_lowerCAmelCase , config.HUB_DATASETS_URL )["""repo_id"""]
__snake_case : int = token_per_repo_id.get(_lowerCAmelCase )
except ValueError:
__snake_case : List[Any] = None
with xopen(_lowerCAmelCase , """rb""" , use_auth_token=_lowerCAmelCase ) as f:
__snake_case : Union[str, Any] = BytesIO(f.read() )
__snake_case : Dict = PIL.Image.open(bytes_ )
else:
__snake_case : Optional[Any] = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def snake_case__ ( self : Union[str, Any] ):
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
)
def snake_case__ ( self : Optional[int] , _lowerCAmelCase : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
if pa.types.is_string(storage.type ):
__snake_case : Optional[Any] = pa.array([None] * len(_lowerCAmelCase ) , type=pa.binary() )
__snake_case : Any = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
__snake_case : Optional[Any] = pa.array([None] * len(_lowerCAmelCase ) , type=pa.string() )
__snake_case : List[str] = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
__snake_case : List[str] = storage.field("""bytes""" )
else:
__snake_case : List[Any] = pa.array([None] * len(_lowerCAmelCase ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
__snake_case : Optional[int] = storage.field("""path""" )
else:
__snake_case : int = pa.array([None] * len(_lowerCAmelCase ) , type=pa.string() )
__snake_case : Tuple = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
__snake_case : Optional[Any] = pa.array(
[encode_np_array(np.array(_lowerCAmelCase ) )["""bytes"""] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
__snake_case : Optional[int] = pa.array([None] * len(_lowerCAmelCase ) , type=pa.string() )
__snake_case : List[str] = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(_lowerCAmelCase , self.pa_type )
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(_lowerCAmelCase : Tuple ):
with xopen(_lowerCAmelCase , """rb""" ) as f:
__snake_case : Optional[int] = f.read()
return bytes_
__snake_case : Tuple = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
__snake_case : Optional[Any] = pa.array(
[os.path.basename(_lowerCAmelCase ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
__snake_case : Any = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(_lowerCAmelCase , self.pa_type )
def __lowerCAmelCase ( ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
__snake_case : Optional[Any] = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : "PIL.Image.Image" ):
'''simple docstring'''
__snake_case : List[Any] = BytesIO()
if image.format in list_image_compression_formats():
__snake_case : Union[str, Any] = image.format
else:
__snake_case : List[Any] = """PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF"""
image.save(__SCREAMING_SNAKE_CASE , format=__SCREAMING_SNAKE_CASE )
return buffer.getvalue()
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : "PIL.Image.Image" ):
'''simple docstring'''
if hasattr(__SCREAMING_SNAKE_CASE , """filename""" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(__SCREAMING_SNAKE_CASE )}
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : np.ndarray ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
__snake_case : List[Any] = array.dtype
__snake_case : List[Any] = dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER
__snake_case : Dict = dtype.kind
__snake_case : Union[str, Any] = dtype.itemsize
__snake_case : Tuple = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
__snake_case : int = np.dtype("""|u1""" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
__snake_case : List[str] = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
__snake_case : int = dtype_byteorder + dtype_kind + str(__SCREAMING_SNAKE_CASE )
__snake_case : Any = np.dtype(__SCREAMING_SNAKE_CASE )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
__snake_case : Optional[int] = PIL.Image.fromarray(array.astype(__SCREAMING_SNAKE_CASE ) )
return {"path": None, "bytes": image_to_bytes(__SCREAMING_SNAKE_CASE )}
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if objs:
__snake_case , __snake_case : Any = first_non_null_value(__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ):
__snake_case : int = no_op_if_value_is_null(__SCREAMING_SNAKE_CASE )
return [obj_to_image_dict_func(__SCREAMING_SNAKE_CASE ) for obj in objs]
elif isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image ):
__snake_case : List[str] = no_op_if_value_is_null(__SCREAMING_SNAKE_CASE )
return [obj_to_image_dict_func(__SCREAMING_SNAKE_CASE ) for obj in objs]
else:
return objs
else:
return objs
| 20
| 0
|
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str=3 , _lowerCAmelCase : int=32 , _lowerCAmelCase : Tuple=3 , _lowerCAmelCase : Any=10 , _lowerCAmelCase : List[Any]=[8, 16, 32, 64] , _lowerCAmelCase : List[Any]=[1, 1, 2, 1] , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Optional[Any]="relu" , _lowerCAmelCase : List[Any]=3 , _lowerCAmelCase : str=None , _lowerCAmelCase : str=["stage2", "stage3", "stage4"] , _lowerCAmelCase : Dict=[2, 3, 4] , _lowerCAmelCase : Union[str, Any]=1 , ):
__snake_case : Optional[int] = parent
__snake_case : Union[str, Any] = batch_size
__snake_case : Dict = image_size
__snake_case : List[Any] = num_channels
__snake_case : List[str] = embeddings_size
__snake_case : Optional[Any] = hidden_sizes
__snake_case : Dict = depths
__snake_case : Union[str, Any] = is_training
__snake_case : Optional[int] = use_labels
__snake_case : Optional[int] = hidden_act
__snake_case : Union[str, Any] = num_labels
__snake_case : List[str] = scope
__snake_case : List[Any] = len(_lowerCAmelCase )
__snake_case : Union[str, Any] = out_features
__snake_case : List[str] = out_indices
__snake_case : Dict = num_groups
def snake_case__ ( self : List[str] ):
__snake_case : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : Tuple = None
if self.use_labels:
__snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
__snake_case : List[Any] = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self : Optional[int] ):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def snake_case__ ( self : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] ):
__snake_case : Any = BitModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__snake_case : List[Any] = model(_lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def snake_case__ ( self : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any] ):
__snake_case : Dict = self.num_labels
__snake_case : List[str] = BitForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__snake_case : Dict = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] ):
__snake_case : List[str] = BitBackbone(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__snake_case : List[str] = model(_lowerCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__snake_case : Dict = None
__snake_case : int = BitBackbone(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__snake_case : List[Any] = model(_lowerCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def snake_case__ ( self : int ):
__snake_case : List[Any] = self.prepare_config_and_inputs()
__snake_case : Union[str, Any] = config_and_inputs
__snake_case : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
A : List[str] = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
A : str = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
A : Optional[Any] = False
A : List[str] = False
A : List[Any] = False
A : Optional[int] = False
A : Tuple = False
def snake_case__ ( self : Optional[int] ):
__snake_case : Any = BitModelTester(self )
__snake_case : Optional[Any] = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase )
def snake_case__ ( self : Tuple ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case__ ( self : Union[str, Any] ):
return
@unittest.skip(reason="""Bit does not output attentions""" )
def snake_case__ ( self : Optional[int] ):
pass
@unittest.skip(reason="""Bit does not use inputs_embeds""" )
def snake_case__ ( self : Dict ):
pass
@unittest.skip(reason="""Bit does not support input and output embeddings""" )
def snake_case__ ( self : Dict ):
pass
def snake_case__ ( self : Optional[Any] ):
__snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Optional[Any] = model_class(_lowerCAmelCase )
__snake_case : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : List[str] = [*signature.parameters.keys()]
__snake_case : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def snake_case__ ( self : Optional[Any] ):
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def snake_case__ ( self : Union[str, Any] ):
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowerCAmelCase )
def snake_case__ ( self : Tuple ):
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Dict = model_class(config=_lowerCAmelCase )
for name, module in model.named_modules():
if isinstance(_lowerCAmelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def snake_case__ ( self : Dict ):
def check_hidden_states_output(_lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict ):
__snake_case : Dict = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
__snake_case : Tuple = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
__snake_case : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__snake_case : Any = self.model_tester.num_stages
self.assertEqual(len(_lowerCAmelCase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : List[Any] = ["""preactivation""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
__snake_case : List[str] = layer_type
__snake_case : str = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : Dict = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
@unittest.skip(reason="""Bit does not use feedforward chunking""" )
def snake_case__ ( self : int ):
pass
def snake_case__ ( self : Any ):
__snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def snake_case__ ( self : int ):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Union[str, Any] = BitModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def snake_case__ ( self : Tuple ):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def snake_case__ ( self : str ):
__snake_case : List[Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_lowerCAmelCase )
__snake_case : Tuple = self.default_image_processor
__snake_case : List[str] = prepare_img()
__snake_case : List[Any] = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
__snake_case : int = model(**_lowerCAmelCase )
# verify the logits
__snake_case : Dict = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__snake_case : Dict = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , unittest.TestCase ):
A : Union[str, Any] = (BitBackbone,) if is_torch_available() else ()
A : Union[str, Any] = BitConfig
A : Dict = False
def snake_case__ ( self : Union[str, Any] ):
__snake_case : int = BitModelTester(self )
| 354
|
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
lowercase_ = logging.get_logger(__name__)
@add_end_docstrings(__UpperCamelCase )
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __init__( self : Dict , *_lowerCAmelCase : Dict , **_lowerCAmelCase : int ):
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def snake_case__ ( self : List[Any] , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : Any=None , _lowerCAmelCase : List[str]=None ):
__snake_case : Optional[Any] = {}
__snake_case : int = {}
if prompt is not None:
__snake_case : Dict = prompt
if generate_kwargs is not None:
__snake_case : List[Any] = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
__snake_case : Optional[int] = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
__snake_case : Any = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : Optional[Any] , _lowerCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_lowerCAmelCase : Union[str, Any] ):
return super().__call__(_lowerCAmelCase , **_lowerCAmelCase )
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str]=None ):
__snake_case : Optional[Any] = load_image(_lowerCAmelCase )
if prompt is not None:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError(
f'''Received an invalid text input, got - {type(_lowerCAmelCase )} - but expected a single string. '''
"""Note also that one single text can be provided for conditional image to text generation.""" )
__snake_case : Tuple = self.model.config.model_type
if model_type == "git":
__snake_case : Tuple = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
__snake_case : Any = self.tokenizer(text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ).input_ids
__snake_case : Tuple = [self.tokenizer.cls_token_id] + input_ids
__snake_case : int = torch.tensor(_lowerCAmelCase ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
__snake_case : Dict = self.image_processor(images=_lowerCAmelCase , header_text=_lowerCAmelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
__snake_case : int = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
__snake_case : Optional[Any] = self.tokenizer(_lowerCAmelCase , return_tensors=self.framework )
model_inputs.update(_lowerCAmelCase )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
__snake_case : Tuple = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
__snake_case : int = None
return model_inputs
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , _lowerCAmelCase )
and all(x is None for x in model_inputs["""input_ids"""] )
):
__snake_case : List[Any] = None
if generate_kwargs is None:
__snake_case : Dict = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
__snake_case : Dict = model_inputs.pop(self.model.main_input_name )
__snake_case : Optional[int] = self.model.generate(_lowerCAmelCase , **_lowerCAmelCase , **_lowerCAmelCase )
return model_outputs
def snake_case__ ( self : List[Any] , _lowerCAmelCase : str ):
__snake_case : Union[str, Any] = []
for output_ids in model_outputs:
__snake_case : Union[str, Any] = {
"""generated_text""": self.tokenizer.decode(
_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , )
}
records.append(_lowerCAmelCase )
return records
| 20
| 0
|
import random
from typing import Any
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list ):
'''simple docstring'''
for _ in range(len(__SCREAMING_SNAKE_CASE ) ):
__snake_case : List[Any] = random.randint(0 , len(__SCREAMING_SNAKE_CASE ) - 1 )
__snake_case : str = random.randint(0 , len(__SCREAMING_SNAKE_CASE ) - 1 )
__snake_case : str = data[b], data[a]
return data
if __name__ == "__main__":
lowercase_ = [0, 1, 2, 3, 4, 5, 6, 7]
lowercase_ = ["python", "says", "hello", "!"]
print("Fisher-Yates Shuffle:")
print("List", integers, strings)
print("FY Shuffle", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 355
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase_ = {"configuration_yolos": ["YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP", "YolosConfig", "YolosOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["YolosFeatureExtractor"]
lowercase_ = ["YolosImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST",
"YolosForObjectDetection",
"YolosModel",
"YolosPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 20
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : Optional[int] = "microsoft/speecht5_tts"
A : List[Any] = (
"This is a tool that reads an English text out loud. It takes an input named `text` which should contain the "
"text to read (in English) and returns a waveform object containing the sound."
)
A : str = "text_reader"
A : Optional[Any] = SpeechTaProcessor
A : Any = SpeechTaForTextToSpeech
A : Optional[Any] = SpeechTaHifiGan
A : str = ["text"]
A : Union[str, Any] = ["audio"]
def snake_case__ ( self : List[Any] ):
if self.post_processor is None:
__snake_case : Tuple = """microsoft/speecht5_hifigan"""
super().setup()
def snake_case__ ( self : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple=None ):
__snake_case : str = self.pre_processor(text=_lowerCAmelCase , return_tensors="""pt""" , truncation=_lowerCAmelCase )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" )
__snake_case : List[Any] = load_dataset("""Matthijs/cmu-arctic-xvectors""" , split="""validation""" )
__snake_case : str = torch.tensor(embeddings_dataset[73_05]["""xvector"""] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def snake_case__ ( self : List[Any] , _lowerCAmelCase : Dict ):
with torch.no_grad():
return self.model.generate_speech(**_lowerCAmelCase )
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : int ):
with torch.no_grad():
return self.post_processor(_lowerCAmelCase ).cpu().detach()
| 356
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : Optional[int] = "microsoft/speecht5_tts"
A : List[Any] = (
"This is a tool that reads an English text out loud. It takes an input named `text` which should contain the "
"text to read (in English) and returns a waveform object containing the sound."
)
A : str = "text_reader"
A : Optional[Any] = SpeechTaProcessor
A : Any = SpeechTaForTextToSpeech
A : Optional[Any] = SpeechTaHifiGan
A : str = ["text"]
A : Union[str, Any] = ["audio"]
def snake_case__ ( self : List[Any] ):
if self.post_processor is None:
__snake_case : Tuple = """microsoft/speecht5_hifigan"""
super().setup()
def snake_case__ ( self : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple=None ):
__snake_case : str = self.pre_processor(text=_lowerCAmelCase , return_tensors="""pt""" , truncation=_lowerCAmelCase )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" )
__snake_case : List[Any] = load_dataset("""Matthijs/cmu-arctic-xvectors""" , split="""validation""" )
__snake_case : str = torch.tensor(embeddings_dataset[73_05]["""xvector"""] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def snake_case__ ( self : List[Any] , _lowerCAmelCase : Dict ):
with torch.no_grad():
return self.model.generate_speech(**_lowerCAmelCase )
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : int ):
with torch.no_grad():
return self.post_processor(_lowerCAmelCase ).cpu().detach()
| 20
| 0
|
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
if "cls_token" in name:
__snake_case : Dict = name.replace("""cls_token""" , """vit.embeddings.cls_token""" )
if "mask_token" in name:
__snake_case : Tuple = name.replace("""mask_token""" , """decoder.mask_token""" )
if "decoder_pos_embed" in name:
__snake_case : int = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
__snake_case : Optional[int] = name.replace("""pos_embed""" , """vit.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
__snake_case : int = name.replace("""patch_embed.proj""" , """vit.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
__snake_case : Tuple = name.replace("""patch_embed.norm""" , """vit.embeddings.norm""" )
if "decoder_blocks" in name:
__snake_case : Dict = name.replace("""decoder_blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
__snake_case : List[str] = name.replace("""blocks""" , """vit.encoder.layer""" )
if "attn.proj" in name:
__snake_case : int = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
__snake_case : Tuple = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
__snake_case : Optional[Any] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__snake_case : Union[str, Any] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__snake_case : str = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__snake_case : List[Any] = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
__snake_case : Optional[Any] = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
__snake_case : Optional[int] = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
__snake_case : Optional[Any] = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name:
__snake_case : Union[str, Any] = name.replace("""norm.weight""" , """vit.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name:
__snake_case : Any = name.replace("""norm.bias""" , """vit.layernorm.bias""" )
return name
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__snake_case : Union[str, Any] = orig_state_dict.pop(__SCREAMING_SNAKE_CASE )
if "qkv" in key:
__snake_case : Tuple = key.split(""".""" )
__snake_case : Dict = int(key_split[1] )
if "decoder_blocks" in key:
__snake_case : Optional[Any] = config.decoder_hidden_size
__snake_case : Union[str, Any] = """decoder.decoder_layers."""
if "weight" in key:
__snake_case : Optional[Any] = val[:dim, :]
__snake_case : Any = val[dim : dim * 2, :]
__snake_case : Tuple = val[-dim:, :]
elif "bias" in key:
__snake_case : Dict = val[:dim]
__snake_case : List[Any] = val[dim : dim * 2]
__snake_case : Any = val[-dim:]
else:
__snake_case : Any = config.hidden_size
__snake_case : Tuple = """vit.encoder.layer."""
if "weight" in key:
__snake_case : Tuple = val[:dim, :]
__snake_case : int = val[dim : dim * 2, :]
__snake_case : List[str] = val[-dim:, :]
elif "bias" in key:
__snake_case : Union[str, Any] = val[:dim]
__snake_case : Tuple = val[dim : dim * 2]
__snake_case : Optional[Any] = val[-dim:]
else:
__snake_case : List[str] = val
return orig_state_dict
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
__snake_case : Any = ViTMAEConfig()
if "large" in checkpoint_url:
__snake_case : Union[str, Any] = 1_0_2_4
__snake_case : List[str] = 4_0_9_6
__snake_case : Optional[int] = 2_4
__snake_case : Optional[int] = 1_6
elif "huge" in checkpoint_url:
__snake_case : Dict = 1_4
__snake_case : str = 1_2_8_0
__snake_case : List[str] = 5_1_2_0
__snake_case : Any = 3_2
__snake_case : Tuple = 1_6
__snake_case : str = ViTMAEForPreTraining(__SCREAMING_SNAKE_CASE )
__snake_case : Optional[int] = torch.hub.load_state_dict_from_url(__SCREAMING_SNAKE_CASE , map_location="""cpu""" )["""model"""]
__snake_case : List[Any] = ViTMAEImageProcessor(size=config.image_size )
__snake_case : Optional[int] = convert_state_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
model.load_state_dict(__SCREAMING_SNAKE_CASE )
model.eval()
__snake_case : Tuple = """https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"""
__snake_case : Any = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw )
__snake_case : Union[str, Any] = ViTMAEImageProcessor(size=config.image_size )
__snake_case : int = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
__snake_case : str = model(**__SCREAMING_SNAKE_CASE )
__snake_case : Union[str, Any] = outputs.logits
if "large" in checkpoint_url:
__snake_case : Any = torch.tensor(
[[-0.73_09, -0.71_28, -1.01_69], [-1.01_61, -0.90_58, -1.18_78], [-1.04_78, -0.94_11, -1.19_11]] )
elif "huge" in checkpoint_url:
__snake_case : int = torch.tensor(
[[-1.15_99, -0.91_99, -1.22_21], [-1.19_52, -0.92_69, -1.23_07], [-1.21_43, -0.93_37, -1.22_62]] )
else:
__snake_case : Tuple = torch.tensor(
[[-0.91_92, -0.84_81, -1.12_59], [-1.13_49, -1.00_34, -1.25_99], [-1.17_57, -1.04_29, -1.27_26]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
lowercase_ = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 357
|
import math
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
if (
not isinstance(__SCREAMING_SNAKE_CASE , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * power_factor
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
if (
not isinstance(__SCREAMING_SNAKE_CASE , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20
| 0
|
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
return round(float(moles / volume ) * nfactor )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
return round(float((moles * 0.08_21 * temperature) / (volume) ) )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
return round(float((moles * 0.08_21 * temperature) / (pressure) ) )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
return round(float((pressure * volume) / (0.08_21 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 358
|
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 20
| 0
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : torch.FloatTensor
A : torch.FloatTensor
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , __UpperCamelCase ):
A : str = 1
@register_to_config
def __init__( self : Tuple , _lowerCAmelCase : int = 20_00 , _lowerCAmelCase : float = 0.15 , _lowerCAmelCase : float = 0.01 , _lowerCAmelCase : float = 1348.0 , _lowerCAmelCase : float = 1e-5 , _lowerCAmelCase : int = 1 , ):
# standard deviation of the initial noise distribution
__snake_case : List[str] = sigma_max
# setable values
__snake_case : Optional[int] = None
self.set_sigmas(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def snake_case__ ( self : Any , _lowerCAmelCase : torch.FloatTensor , _lowerCAmelCase : Optional[int] = None ):
return sample
def snake_case__ ( self : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : float = None , _lowerCAmelCase : Union[str, torch.device] = None ):
__snake_case : str = sampling_eps if sampling_eps is not None else self.config.sampling_eps
__snake_case : Optional[int] = torch.linspace(1 , _lowerCAmelCase , _lowerCAmelCase , device=_lowerCAmelCase )
def snake_case__ ( self : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : float = None , _lowerCAmelCase : float = None , _lowerCAmelCase : float = None ):
__snake_case : List[Any] = sigma_min if sigma_min is not None else self.config.sigma_min
__snake_case : str = sigma_max if sigma_max is not None else self.config.sigma_max
__snake_case : Dict = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(_lowerCAmelCase , _lowerCAmelCase )
__snake_case : Union[str, Any] = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
__snake_case : Any = torch.exp(torch.linspace(math.log(_lowerCAmelCase ) , math.log(_lowerCAmelCase ) , _lowerCAmelCase ) )
__snake_case : Tuple = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def snake_case__ ( self : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str ):
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : torch.FloatTensor , _lowerCAmelCase : int , _lowerCAmelCase : torch.FloatTensor , _lowerCAmelCase : Optional[torch.Generator] = None , _lowerCAmelCase : bool = True , ):
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
__snake_case : List[Any] = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
__snake_case : str = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
__snake_case : List[str] = timesteps.to(self.discrete_sigmas.device )
__snake_case : Tuple = self.discrete_sigmas[timesteps].to(sample.device )
__snake_case : Union[str, Any] = self.get_adjacent_sigma(_lowerCAmelCase , _lowerCAmelCase ).to(sample.device )
__snake_case : str = torch.zeros_like(_lowerCAmelCase )
__snake_case : Dict = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
__snake_case : Optional[Any] = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
__snake_case : List[str] = diffusion.unsqueeze(-1 )
__snake_case : Union[str, Any] = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
__snake_case : List[str] = randn_tensor(
sample.shape , layout=sample.layout , generator=_lowerCAmelCase , device=sample.device , dtype=sample.dtype )
__snake_case : Dict = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
__snake_case : Dict = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=_lowerCAmelCase , prev_sample_mean=_lowerCAmelCase )
def snake_case__ ( self : Optional[int] , _lowerCAmelCase : torch.FloatTensor , _lowerCAmelCase : torch.FloatTensor , _lowerCAmelCase : Optional[torch.Generator] = None , _lowerCAmelCase : bool = True , ):
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
__snake_case : str = randn_tensor(sample.shape , layout=sample.layout , generator=_lowerCAmelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
__snake_case : Dict = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
__snake_case : List[str] = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
__snake_case : Union[str, Any] = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
__snake_case : List[str] = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
__snake_case : Optional[Any] = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
__snake_case : str = step_size.unsqueeze(-1 )
__snake_case : Union[str, Any] = sample + step_size * model_output
__snake_case : Union[str, Any] = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_lowerCAmelCase )
def snake_case__ ( self : Tuple , _lowerCAmelCase : torch.FloatTensor , _lowerCAmelCase : torch.FloatTensor , _lowerCAmelCase : torch.FloatTensor , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__snake_case : List[str] = timesteps.to(original_samples.device )
__snake_case : int = self.discrete_sigmas.to(original_samples.device )[timesteps]
__snake_case : Tuple = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(_lowerCAmelCase ) * sigmas[:, None, None, None]
)
__snake_case : str = noise + original_samples
return noisy_samples
def __len__( self : Tuple ):
return self.config.num_train_timesteps
| 359
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {
"configuration_blenderbot_small": [
"BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotSmallConfig",
"BlenderbotSmallOnnxConfig",
],
"tokenization_blenderbot_small": ["BlenderbotSmallTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["BlenderbotSmallTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotSmallForCausalLM",
"BlenderbotSmallForConditionalGeneration",
"BlenderbotSmallModel",
"BlenderbotSmallPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TFBlenderbotSmallForConditionalGeneration",
"TFBlenderbotSmallModel",
"TFBlenderbotSmallPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"FlaxBlenderbotSmallForConditionalGeneration",
"FlaxBlenderbotSmallModel",
"FlaxBlenderbotSmallPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 20
| 0
|
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
lowercase_ = "\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n"
lowercase_ = "\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.\n"
lowercase_ = r"\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting \"1/2\" to \"\\frac{1}{2}\")\n\nExamples:\n >>> metric = datasets.load_metric(\"competition_math\")\n >>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])\n >>> print(results)\n {'accuracy': 1.0}\n"
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def snake_case__ ( self : List[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" ),
"""references""": datasets.Value("""string""" ),
} ) , homepage="""https://github.com/hendrycks/math""" , codebase_urls=["""https://github.com/hendrycks/math"""] , )
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple ):
__snake_case : Optional[int] = 0.0
for i, j in zip(_lowerCAmelCase , _lowerCAmelCase ):
n_correct += 1.0 if math_equivalence.is_equiv(_lowerCAmelCase , _lowerCAmelCase ) else 0.0
__snake_case : Optional[int] = n_correct / len(_lowerCAmelCase )
return {
"accuracy": accuracy,
}
| 360
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["ViTFeatureExtractor"]
lowercase_ = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 20
| 0
|
"""simple docstring"""
import functools
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : list[int] ):
'''simple docstring'''
# Validation
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or not all(isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for day in days ):
raise ValueError("""The parameter days should be a list of integers""" )
if len(__SCREAMING_SNAKE_CASE ) != 3 or not all(isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for cost in costs ):
raise ValueError("""The parameter costs should be a list of three integers""" )
if len(__SCREAMING_SNAKE_CASE ) == 0:
return 0
if min(__SCREAMING_SNAKE_CASE ) <= 0:
raise ValueError("""All days elements should be greater than 0""" )
if max(__SCREAMING_SNAKE_CASE ) >= 3_6_6:
raise ValueError("""All days elements should be less than 366""" )
__snake_case : Dict = set(__SCREAMING_SNAKE_CASE )
@functools.cache
def dynamic_programming(__SCREAMING_SNAKE_CASE : int ) -> int:
if index > 3_6_5:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 3_0 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361
|
from __future__ import annotations
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
__snake_case , __snake_case : str = array[indexa], array[indexa]
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if length > 1:
__snake_case : Tuple = int(length / 2 )
for i in range(__SCREAMING_SNAKE_CASE , low + middle ):
comp_and_swap(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , i + middle , __SCREAMING_SNAKE_CASE )
bitonic_merge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
bitonic_merge(__SCREAMING_SNAKE_CASE , low + middle , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if length > 1:
__snake_case : Optional[Any] = int(length / 2 )
bitonic_sort(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , 1 )
bitonic_sort(__SCREAMING_SNAKE_CASE , low + middle , __SCREAMING_SNAKE_CASE , 0 )
bitonic_merge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = input("Enter numbers separated by a comma:\n").strip()
lowercase_ = [int(item.strip()) for item in user_input.split(",")]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print("\nSorted array in ascending order is: ", end="")
print(*unsorted, sep=", ")
bitonic_merge(unsorted, 0, len(unsorted), 0)
print("Sorted array in descending order is: ", end="")
print(*unsorted, sep=", ")
| 20
| 0
|
"""simple docstring"""
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
__snake_case : Optional[Any] = tmp_path / """cache"""
__snake_case : Tuple = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__snake_case : List[Any] = SqlDatasetReader(
"""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE ).read()
_check_sql_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@require_sqlalchemy
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
__snake_case : Dict = tmp_path / """cache"""
__snake_case : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__snake_case : Tuple = features.copy() if features else default_expected_features
__snake_case : Optional[int] = (
Features({feature: Value(__SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
__snake_case : Any = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read()
_check_sql_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
with contextlib.closing(sqlitea.connect(__SCREAMING_SNAKE_CASE ) ) as con:
__snake_case : List[str] = con.cursor()
cur.execute("""SELECT * FROM dataset""" )
for row in cur:
yield row
@require_sqlalchemy
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
__snake_case : str = tmp_path / """cache"""
__snake_case : int = os.path.join(__SCREAMING_SNAKE_CASE , """tmp.sql""" )
__snake_case : Tuple = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__SCREAMING_SNAKE_CASE ).read()
SqlDatasetWriter(__SCREAMING_SNAKE_CASE , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=1 ).write()
__snake_case : int = iter_sql_file(__SCREAMING_SNAKE_CASE )
__snake_case : int = iter_sql_file(__SCREAMING_SNAKE_CASE )
for rowa, rowa in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
assert rowa == rowa
@require_sqlalchemy
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
__snake_case : int = tmp_path / """cache"""
__snake_case : Optional[Any] = os.path.join(__SCREAMING_SNAKE_CASE , """tmp.sql""" )
__snake_case : Optional[int] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__SCREAMING_SNAKE_CASE ).read()
SqlDatasetWriter(__SCREAMING_SNAKE_CASE , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=2 ).write()
__snake_case : List[str] = iter_sql_file(__SCREAMING_SNAKE_CASE )
__snake_case : Optional[Any] = iter_sql_file(__SCREAMING_SNAKE_CASE )
for rowa, rowa in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
assert rowa == rowa
@require_sqlalchemy
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
__snake_case : List[str] = tmp_path / """cache"""
__snake_case : Union[str, Any] = os.path.join(__SCREAMING_SNAKE_CASE , """tmp.sql""" )
__snake_case : Any = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__SCREAMING_SNAKE_CASE ).read()
with pytest.raises(__SCREAMING_SNAKE_CASE ):
SqlDatasetWriter(__SCREAMING_SNAKE_CASE , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=0 ).write()
| 362
|
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
"""compression_format, is_archive""" , [
("""7z""", True),
("""bz2""", False),
("""gzip""", False),
("""lz4""", False),
("""tar""", True),
("""xz""", False),
("""zip""", True),
("""zstd""", False),
] , )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , ):
'''simple docstring'''
__snake_case : Optional[int] = {
"""7z""": (seven_zip_file, SevenZipExtractor),
"""bz2""": (bza_file, BzipaExtractor),
"""gzip""": (gz_file, GzipExtractor),
"""lz4""": (lza_file, LzaExtractor),
"""tar""": (tar_file, TarExtractor),
"""xz""": (xz_file, XzExtractor),
"""zip""": (zip_file, ZipExtractor),
"""zstd""": (zstd_file, ZstdExtractor),
}
__snake_case , __snake_case : Tuple = input_paths_and_base_extractors[compression_format]
if input_path is None:
__snake_case : Tuple = F'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__SCREAMING_SNAKE_CASE )
assert base_extractor.is_extractable(__SCREAMING_SNAKE_CASE )
__snake_case : List[str] = tmp_path / ("""extracted""" if is_archive else """extracted.txt""")
base_extractor.extract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
__snake_case : List[str] = file_path.read_text(encoding="""utf-8""" )
else:
__snake_case : Optional[Any] = output_path.read_text(encoding="""utf-8""" )
__snake_case : int = text_file.read_text(encoding="""utf-8""" )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
"""compression_format, is_archive""" , [
("""7z""", True),
("""bz2""", False),
("""gzip""", False),
("""lz4""", False),
("""tar""", True),
("""xz""", False),
("""zip""", True),
("""zstd""", False),
] , )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , ):
'''simple docstring'''
__snake_case : Union[str, Any] = {
"""7z""": seven_zip_file,
"""bz2""": bza_file,
"""gzip""": gz_file,
"""lz4""": lza_file,
"""tar""": tar_file,
"""xz""": xz_file,
"""zip""": zip_file,
"""zstd""": zstd_file,
}
__snake_case : int = input_paths[compression_format]
if input_path is None:
__snake_case : int = F'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__SCREAMING_SNAKE_CASE )
__snake_case : Any = Extractor.infer_extractor_format(__SCREAMING_SNAKE_CASE )
assert extractor_format is not None
__snake_case : Tuple = tmp_path / ("""extracted""" if is_archive else """extracted.txt""")
Extractor.extract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
__snake_case : Union[str, Any] = file_path.read_text(encoding="""utf-8""" )
else:
__snake_case : Union[str, Any] = output_path.read_text(encoding="""utf-8""" )
__snake_case : Optional[Any] = text_file.read_text(encoding="""utf-8""" )
assert extracted_file_content == expected_file_content
@pytest.fixture
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
import tarfile
__snake_case : List[str] = tmp_path / """data_dot_dot"""
directory.mkdir()
__snake_case : Optional[Any] = directory / """tar_file_with_dot_dot.tar"""
with tarfile.TarFile(__SCREAMING_SNAKE_CASE , """w""" ) as f:
f.add(__SCREAMING_SNAKE_CASE , arcname=os.path.join("""..""" , text_file.name ) )
return path
@pytest.fixture
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
import tarfile
__snake_case : Dict = tmp_path / """data_sym_link"""
directory.mkdir()
__snake_case : Tuple = directory / """tar_file_with_sym_link.tar"""
os.symlink("""..""" , directory / """subdir""" , target_is_directory=__SCREAMING_SNAKE_CASE )
with tarfile.TarFile(__SCREAMING_SNAKE_CASE , """w""" ) as f:
f.add(str(directory / """subdir""" ) , arcname="""subdir""" ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
"""insecure_tar_file, error_log""" , [("""tar_file_with_dot_dot""", """illegal path"""), ("""tar_file_with_sym_link""", """Symlink""")] , )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
__snake_case : Any = {
"""tar_file_with_dot_dot""": tar_file_with_dot_dot,
"""tar_file_with_sym_link""": tar_file_with_sym_link,
}
__snake_case : int = insecure_tar_files[insecure_tar_file]
__snake_case : Optional[int] = tmp_path / """extracted"""
TarExtractor.extract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
__snake_case : Optional[Any] = tmpdir / """not_a_zip_file"""
# From: https://github.com/python/cpython/pull/5053
__snake_case : List[str] = (
b"""\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00"""
b"""\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I"""
b"""DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07"""
b"""\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82"""
)
with not_a_zip_file.open("""wb""" ) as f:
f.write(__SCREAMING_SNAKE_CASE )
assert zipfile.is_zipfile(str(__SCREAMING_SNAKE_CASE ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(__SCREAMING_SNAKE_CASE ) # but we're right
| 20
| 0
|
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , unittest.TestCase ):
# TODO: is there an appropriate internal test set?
A : List[str] = "ssube/stable-diffusion-x4-upscaler-onnx"
def lowerCAmelCase__ ( self : int , _lowerCAmelCase : List[Any]=0 ):
__snake_case : int = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(_lowerCAmelCase ) )
__snake_case : Any = torch.manual_seed(_lowerCAmelCase )
__snake_case : str = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase__ ( self : Any ):
__snake_case : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__snake_case : Dict = self.get_dummy_inputs()
__snake_case : int = pipe(**_lowerCAmelCase ).images
__snake_case : List[Any] = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_12, 5_12, 3)
__snake_case : List[str] = np.array(
[0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def lowerCAmelCase__ ( self : Optional[int] ):
__snake_case : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__snake_case : Any = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__snake_case : Any = self.get_dummy_inputs()
__snake_case : Optional[Any] = pipe(**_lowerCAmelCase ).images
__snake_case : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__snake_case : Optional[int] = np.array(
[0.6898892, 0.59240556, 0.52499527, 0.58866215, 0.52258235, 0.52572715, 0.62414473, 0.6174387, 0.6214964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCAmelCase__ ( self : List[Any] ):
__snake_case : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__snake_case : int = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__snake_case : int = self.get_dummy_inputs()
__snake_case : List[str] = pipe(**_lowerCAmelCase ).images
__snake_case : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__snake_case : Optional[int] = np.array(
[0.7659278, 0.76437664, 0.75579107, 0.7691116, 0.77666986, 0.7727672, 0.7758664, 0.7812226, 0.76942515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCAmelCase__ ( self : List[str] ):
__snake_case : Optional[int] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__snake_case : Tuple = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__snake_case : int = self.get_dummy_inputs()
__snake_case : List[str] = pipe(**_lowerCAmelCase ).images
__snake_case : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__snake_case : Union[str, Any] = np.array(
[0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCAmelCase__ ( self : List[str] ):
__snake_case : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__snake_case : Dict = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__snake_case : List[str] = self.get_dummy_inputs()
__snake_case : List[str] = pipe(**_lowerCAmelCase ).images
__snake_case : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__snake_case : Union[str, Any] = np.array(
[0.77424496, 0.773601, 0.7645288, 0.7769598, 0.7772739, 0.7738688, 0.78187233, 0.77879584, 0.767043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@property
def lowerCAmelCase__ ( self : Tuple ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase__ ( self : Any ):
__snake_case : List[Any] = ort.SessionOptions()
__snake_case : Dict = False
return options
def lowerCAmelCase__ ( self : List[Any] ):
__snake_case : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
__snake_case : List[str] = init_image.resize((1_28, 1_28) )
# using the PNDM scheduler by default
__snake_case : Any = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__snake_case : Union[str, Any] = """A fantasy landscape, trending on artstation"""
__snake_case : Union[str, Any] = torch.manual_seed(0 )
__snake_case : Dict = pipe(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCAmelCase , output_type="""np""" , )
__snake_case : Union[str, Any] = output.images
__snake_case : Union[str, Any] = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 5_12, 3)
__snake_case : int = np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def lowerCAmelCase__ ( self : Tuple ):
__snake_case : str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
__snake_case : Union[str, Any] = init_image.resize((1_28, 1_28) )
__snake_case : Union[str, Any] = LMSDiscreteScheduler.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , subfolder="""scheduler""" )
__snake_case : Any = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , scheduler=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__snake_case : List[Any] = """A fantasy landscape, trending on artstation"""
__snake_case : int = torch.manual_seed(0 )
__snake_case : List[Any] = pipe(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=20 , generator=_lowerCAmelCase , output_type="""np""" , )
__snake_case : Any = output.images
__snake_case : List[Any] = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 5_12, 3)
__snake_case : Optional[Any] = np.array(
[0.50173753, 0.50223356, 0.502039, 0.50233036, 0.5023725, 0.5022601, 0.5018758, 0.50234085, 0.50241566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 363
|
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def snake_case__ ( self : Any ):
__snake_case : Dict = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
__snake_case : str = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
__snake_case : List[str] = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
__snake_case : str = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 1_60_00,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
__snake_case : Optional[Any] = tempfile.mkdtemp()
__snake_case : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__snake_case : Any = os.path.join(self.tmpdirname , _lowerCAmelCase )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + """\n""" )
with open(self.feature_extraction_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + """\n""" )
# load decoder from hub
__snake_case : Optional[int] = """hf-internal-testing/ngram-beam-search-decoder"""
def snake_case__ ( self : Optional[Any] , **_lowerCAmelCase : Tuple ):
__snake_case : int = self.add_kwargs_tokens_map.copy()
kwargs.update(_lowerCAmelCase )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def snake_case__ ( self : Union[str, Any] , **_lowerCAmelCase : Optional[int] ):
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def snake_case__ ( self : Dict , **_lowerCAmelCase : Tuple ):
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_lowerCAmelCase )
def snake_case__ ( self : List[str] ):
shutil.rmtree(self.tmpdirname )
def snake_case__ ( self : Union[str, Any] ):
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : Tuple = self.get_feature_extractor()
__snake_case : Dict = self.get_decoder()
__snake_case : List[str] = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
__snake_case : Dict = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCAmelCase )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _lowerCAmelCase )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , _lowerCAmelCase )
def snake_case__ ( self : Tuple ):
__snake_case : Tuple = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__snake_case : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def snake_case__ ( self : int ):
__snake_case : Tuple = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(_lowerCAmelCase , """include""" ):
WavaVecaProcessorWithLM(
tokenizer=_lowerCAmelCase , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def snake_case__ ( self : Dict ):
__snake_case : int = self.get_feature_extractor()
__snake_case : str = self.get_tokenizer()
__snake_case : Dict = self.get_decoder()
__snake_case : Any = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
__snake_case : List[Any] = floats_list((3, 10_00) )
__snake_case : Optional[Any] = feature_extractor(_lowerCAmelCase , return_tensors="""np""" )
__snake_case : Tuple = processor(_lowerCAmelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case__ ( self : Optional[int] ):
__snake_case : Any = self.get_feature_extractor()
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : int = self.get_decoder()
__snake_case : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
__snake_case : Optional[int] = """This is a test string"""
__snake_case : Union[str, Any] = processor(text=_lowerCAmelCase )
__snake_case : Dict = tokenizer(_lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case__ ( self : List[str] , _lowerCAmelCase : List[Any]=(2, 10, 16) , _lowerCAmelCase : str=77 ):
np.random.seed(_lowerCAmelCase )
return np.random.rand(*_lowerCAmelCase )
def snake_case__ ( self : Tuple ):
__snake_case : List[str] = self.get_feature_extractor()
__snake_case : List[str] = self.get_tokenizer()
__snake_case : List[str] = self.get_decoder()
__snake_case : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
__snake_case : Optional[int] = self._get_dummy_logits(shape=(10, 16) , seed=13 )
__snake_case : int = processor.decode(_lowerCAmelCase )
__snake_case : Optional[int] = decoder.decode_beams(_lowerCAmelCase )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("""</s> <s> </s>""" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def snake_case__ ( self : List[str] , _lowerCAmelCase : List[str] ):
__snake_case : int = self.get_feature_extractor()
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : int = self.get_decoder()
__snake_case : Dict = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
__snake_case : int = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__snake_case : Tuple = processor.batch_decode(_lowerCAmelCase )
else:
with get_context(_lowerCAmelCase ).Pool() as pool:
__snake_case : int = processor.batch_decode(_lowerCAmelCase , _lowerCAmelCase )
__snake_case : int = list(_lowerCAmelCase )
with get_context("""fork""" ).Pool() as p:
__snake_case : Tuple = decoder.decode_beams_batch(_lowerCAmelCase , _lowerCAmelCase )
__snake_case , __snake_case , __snake_case : List[Any] = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_lowerCAmelCase , decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] , decoded_processor.text )
self.assertListEqual(_lowerCAmelCase , decoded_processor.logit_score )
self.assertListEqual(_lowerCAmelCase , decoded_processor.lm_score )
def snake_case__ ( self : Optional[int] ):
__snake_case : Optional[Any] = self.get_feature_extractor()
__snake_case : int = self.get_tokenizer()
__snake_case : str = self.get_decoder()
__snake_case : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
__snake_case : int = self._get_dummy_logits()
__snake_case : List[str] = 15
__snake_case : Optional[Any] = -20.0
__snake_case : Tuple = -4.0
__snake_case : List[Any] = processor.batch_decode(
_lowerCAmelCase , beam_width=_lowerCAmelCase , beam_prune_logp=_lowerCAmelCase , token_min_logp=_lowerCAmelCase , )
__snake_case : List[str] = decoded_processor_out.text
__snake_case : str = list(_lowerCAmelCase )
with get_context("""fork""" ).Pool() as pool:
__snake_case : Dict = decoder.decode_beams_batch(
_lowerCAmelCase , _lowerCAmelCase , beam_width=_lowerCAmelCase , beam_prune_logp=_lowerCAmelCase , token_min_logp=_lowerCAmelCase , )
__snake_case : int = [d[0][0] for d in decoded_decoder_out]
__snake_case : List[Any] = [d[0][2] for d in decoded_decoder_out]
__snake_case : List[Any] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] , _lowerCAmelCase )
self.assertTrue(np.array_equal(_lowerCAmelCase , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , _lowerCAmelCase , atol=1e-3 ) )
self.assertTrue(np.array_equal(_lowerCAmelCase , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , _lowerCAmelCase , atol=1e-3 ) )
def snake_case__ ( self : Any ):
__snake_case : List[Any] = self.get_feature_extractor()
__snake_case : Any = self.get_tokenizer()
__snake_case : Union[str, Any] = self.get_decoder()
__snake_case : Dict = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
__snake_case : Any = self._get_dummy_logits()
__snake_case : Any = 2.0
__snake_case : int = 5.0
__snake_case : Optional[int] = -20.0
__snake_case : Optional[int] = True
__snake_case : Any = processor.batch_decode(
_lowerCAmelCase , alpha=_lowerCAmelCase , beta=_lowerCAmelCase , unk_score_offset=_lowerCAmelCase , lm_score_boundary=_lowerCAmelCase , )
__snake_case : str = decoded_processor_out.text
__snake_case : int = list(_lowerCAmelCase )
decoder.reset_params(
alpha=_lowerCAmelCase , beta=_lowerCAmelCase , unk_score_offset=_lowerCAmelCase , lm_score_boundary=_lowerCAmelCase , )
with get_context("""fork""" ).Pool() as pool:
__snake_case : Tuple = decoder.decode_beams_batch(
_lowerCAmelCase , _lowerCAmelCase , )
__snake_case : int = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] , _lowerCAmelCase )
__snake_case : List[str] = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , _lowerCAmelCase )
def snake_case__ ( self : Dict ):
__snake_case : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__snake_case : Union[str, Any] = processor.decoder.model_container[processor.decoder._model_key]
__snake_case : Optional[int] = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__snake_case : Union[str, Any] = os.listdir(_lowerCAmelCase )
__snake_case : List[str] = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def snake_case__ ( self : Optional[Any] ):
__snake_case : Union[str, Any] = snapshot_download("""hf-internal-testing/processor_with_lm""" )
__snake_case : Dict = WavaVecaProcessorWithLM.from_pretrained(_lowerCAmelCase )
__snake_case : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key]
__snake_case : Optional[int] = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__snake_case : List[str] = os.listdir(_lowerCAmelCase )
__snake_case : List[Any] = os.listdir(_lowerCAmelCase )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def snake_case__ ( self : Optional[Any] ):
__snake_case : Optional[int] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__snake_case : str = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__snake_case : Optional[int] = floats_list((3, 10_00) )
__snake_case : Union[str, Any] = processor_wavaveca(_lowerCAmelCase , return_tensors="""np""" )
__snake_case : Union[str, Any] = processor_auto(_lowerCAmelCase , return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
__snake_case : Dict = self._get_dummy_logits()
__snake_case : List[Any] = processor_wavaveca.batch_decode(_lowerCAmelCase )
__snake_case : List[Any] = processor_auto.batch_decode(_lowerCAmelCase )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def snake_case__ ( self : str ):
__snake_case : int = self.get_feature_extractor()
__snake_case : List[str] = self.get_tokenizer()
__snake_case : Optional[Any] = self.get_decoder()
__snake_case : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
@staticmethod
def snake_case__ ( _lowerCAmelCase : Any , _lowerCAmelCase : Tuple ):
__snake_case : Union[str, Any] = [d[key] for d in offsets]
return retrieved_list
def snake_case__ ( self : Dict ):
__snake_case : int = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__snake_case : List[str] = self._get_dummy_logits()[0]
__snake_case : str = processor.decode(_lowerCAmelCase , output_word_offsets=_lowerCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """end_offset""" ) , [1, 3, 5] )
def snake_case__ ( self : List[str] ):
__snake_case : Any = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__snake_case : Optional[int] = self._get_dummy_logits()
__snake_case : int = processor.batch_decode(_lowerCAmelCase , output_word_offsets=_lowerCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(_lowerCAmelCase , """word""" ) ) for o in outputs["""word_offsets"""]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """end_offset""" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def snake_case__ ( self : Optional[Any] ):
import torch
__snake_case : Optional[Any] = load_dataset("""common_voice""" , """en""" , split="""train""" , streaming=_lowerCAmelCase )
__snake_case : Any = ds.cast_column("""audio""" , datasets.Audio(sampling_rate=1_60_00 ) )
__snake_case : List[Any] = iter(_lowerCAmelCase )
__snake_case : Optional[int] = next(_lowerCAmelCase )
__snake_case : str = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
__snake_case : str = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__snake_case : List[str] = processor(sample["""audio"""]["""array"""] , return_tensors="""pt""" ).input_values
with torch.no_grad():
__snake_case : Dict = model(_lowerCAmelCase ).logits.cpu().numpy()
__snake_case : Any = processor.decode(logits[0] , output_word_offsets=_lowerCAmelCase )
__snake_case : Optional[Any] = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__snake_case : Dict = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
__snake_case : Dict = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(_lowerCAmelCase , """word""" ) ) , _lowerCAmelCase )
self.assertEqual(""" """.join(self.get_from_offsets(_lowerCAmelCase , """word""" ) ) , output.text )
# output times
__snake_case : Dict = torch.tensor(self.get_from_offsets(_lowerCAmelCase , """start_time""" ) )
__snake_case : Optional[Any] = torch.tensor(self.get_from_offsets(_lowerCAmelCase , """end_time""" ) )
# fmt: off
__snake_case : Optional[Any] = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
__snake_case : Optional[int] = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=0.01 ) )
self.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=0.01 ) )
| 20
| 0
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"configuration_trajectory_transformer": [
"TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TrajectoryTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrajectoryTransformerModel",
"TrajectoryTransformerPreTrainedModel",
"load_tf_weights_in_trajectory_transformer",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 364
|
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowercase_ = {
"text_branch": "text_model",
"audio_branch": "audio_model.audio_encoder",
"attn": "attention.self",
"self.proj": "output.dense",
"attention.self_mask": "attn_mask",
"mlp.fc1": "intermediate.dense",
"mlp.fc2": "output.dense",
"norm1": "layernorm_before",
"norm2": "layernorm_after",
"bn0": "batch_norm",
}
lowercase_ = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc")
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]=False ):
'''simple docstring'''
__snake_case , __snake_case : str = create_model(
"""HTSAT-tiny""" , """roberta""" , __SCREAMING_SNAKE_CASE , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=__SCREAMING_SNAKE_CASE , fusion_type="""aff_2d""" if enable_fusion else None , )
return model, model_cfg
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
__snake_case : Union[str, Any] = {}
__snake_case : List[Any] = R""".*sequential.(\d+).*"""
__snake_case : Union[str, Any] = R""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__snake_case : Optional[Any] = key.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# replace sequential layers with list
__snake_case : Optional[Any] = re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 )
__snake_case : Dict = key.replace(F'''sequential.{sequential_layer}.''' , F'''layers.{int(__SCREAMING_SNAKE_CASE )//3}.linear.''' )
elif re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case : str = int(re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
__snake_case : List[Any] = 1 if projecton_layer == 0 else 2
__snake_case : Tuple = key.replace(F'''_projection.{projecton_layer}.''' , F'''_projection.linear{transformers_projection_layer}.''' )
if "audio" and "qkv" in key:
# split qkv into query key and value
__snake_case : Optional[int] = value
__snake_case : Any = mixed_qkv.size(0 ) // 3
__snake_case : List[Any] = mixed_qkv[:qkv_dim]
__snake_case : Tuple = mixed_qkv[qkv_dim : qkv_dim * 2]
__snake_case : List[Any] = mixed_qkv[qkv_dim * 2 :]
__snake_case : Any = query_layer
__snake_case : Dict = key_layer
__snake_case : Optional[Any] = value_layer
else:
__snake_case : List[str] = value
return model_state_dict
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=False ):
'''simple docstring'''
__snake_case , __snake_case : List[str] = init_clap(__SCREAMING_SNAKE_CASE , enable_fusion=__SCREAMING_SNAKE_CASE )
clap_model.eval()
__snake_case : Tuple = clap_model.state_dict()
__snake_case : Union[str, Any] = rename_state_dict(__SCREAMING_SNAKE_CASE )
__snake_case : List[Any] = ClapConfig()
__snake_case : Tuple = enable_fusion
__snake_case : Any = ClapModel(__SCREAMING_SNAKE_CASE )
# ignore the spectrogram embedding layer
model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
transformers_config.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not")
lowercase_ = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 20
| 0
|
lowercase_ = [
(10_00, "M"),
(9_00, "CM"),
(5_00, "D"),
(4_00, "CD"),
(1_00, "C"),
(90, "XC"),
(50, "L"),
(40, "XL"),
(10, "X"),
(9, "IX"),
(5, "V"),
(4, "IV"),
(1, "I"),
]
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
__snake_case : int = {"""I""": 1, """V""": 5, """X""": 1_0, """L""": 5_0, """C""": 1_0_0, """D""": 5_0_0, """M""": 1_0_0_0}
__snake_case : Tuple = 0
__snake_case : List[Any] = 0
while place < len(__SCREAMING_SNAKE_CASE ):
if (place + 1 < len(__SCREAMING_SNAKE_CASE )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
__snake_case : Optional[Any] = []
for arabic, roman in ROMAN:
(__snake_case) : List[str] = divmod(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
result.append(roman * factor )
if number == 0:
break
return "".join(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 365
|
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = "▁"
lowercase_ = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
lowercase_ = {
"sentencepiece_model_file": "sentencepiece.bpe.model",
"vocab_file": "vocab.txt",
}
lowercase_ = {
"vocab_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
},
"sentencepiece_model_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
},
}
lowercase_ = {
"ernie-m-base": 5_14,
"ernie-m-large": 5_14,
}
lowercase_ = {
"ernie-m-base": {"do_lower_case": False},
"ernie-m-large": {"do_lower_case": False},
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : List[str] = ["input_ids"]
A : Tuple = VOCAB_FILES_NAMES
A : List[Any] = PRETRAINED_INIT_CONFIGURATION
A : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
A : Optional[Any] = RESOURCE_FILES_NAMES
def __init__( self : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict=None , _lowerCAmelCase : Dict=False , _lowerCAmelCase : List[Any]="utf8" , _lowerCAmelCase : Optional[Any]="[UNK]" , _lowerCAmelCase : Optional[int]="[SEP]" , _lowerCAmelCase : List[str]="[PAD]" , _lowerCAmelCase : Dict="[CLS]" , _lowerCAmelCase : List[Any]="[MASK]" , _lowerCAmelCase : Optional[Dict[str, Any]] = None , **_lowerCAmelCase : Any , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__snake_case : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , vocab_file=_lowerCAmelCase , encoding=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
__snake_case : List[Any] = do_lower_case
__snake_case : Any = sentencepiece_model_ckpt
__snake_case : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCAmelCase )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
__snake_case : int = self.load_vocab(filepath=_lowerCAmelCase )
else:
__snake_case : Tuple = {self.sp_model.id_to_piece(_lowerCAmelCase ): id for id in range(self.sp_model.get_piece_size() )}
__snake_case : str = {v: k for k, v in self.vocab.items()}
def snake_case__ ( self : List[Any] , _lowerCAmelCase : List[Any] ):
if text is None:
return None
__snake_case : List[Any] = self.tokenize(_lowerCAmelCase )
__snake_case , __snake_case : Optional[Any] = """""", []
for i, ch in enumerate(_lowerCAmelCase ):
if ch in self.SP_CHAR_MAPPING:
__snake_case : Any = self.SP_CHAR_MAPPING.get(_lowerCAmelCase )
else:
__snake_case : Dict = unicodedata.normalize("""NFKC""" , _lowerCAmelCase )
if self.is_whitespace(_lowerCAmelCase ):
continue
normalized_text += ch
char_mapping.extend([i] * len(_lowerCAmelCase ) )
__snake_case , __snake_case , __snake_case : str = normalized_text, [], 0
if self.do_lower_case:
__snake_case : int = text.lower()
for token in split_tokens:
if token[:1] == "▁":
__snake_case : int = token[1:]
__snake_case : Optional[int] = text[offset:].index(_lowerCAmelCase ) + offset
__snake_case : int = start + len(_lowerCAmelCase )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
__snake_case : str = end
return token_mapping
@property
def snake_case__ ( self : Tuple ):
return len(self.vocab )
def snake_case__ ( self : Dict ):
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self : int ):
__snake_case : str = self.__dict__.copy()
__snake_case : Union[str, Any] = None
return state
def __setstate__( self : Optional[Any] , _lowerCAmelCase : List[str] ):
__snake_case : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__snake_case : Optional[Any] = {}
__snake_case : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def snake_case__ ( self : str , _lowerCAmelCase : Optional[int] ):
return "".join((self.SP_CHAR_MAPPING.get(_lowerCAmelCase , _lowerCAmelCase ) for c in text) )
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : Optional[Any]=64 , _lowerCAmelCase : Optional[int]=0.1 ):
if self.sp_model_kwargs.get("""enable_sampling""" ) is True:
__snake_case : List[str] = True
if self.sp_model_kwargs.get("""alpha""" ) is not None:
__snake_case : Dict = self.sp_model_kwargs.get("""alpha""" )
if self.sp_model_kwargs.get("""nbest_size""" ) is not None:
__snake_case : List[Any] = self.sp_model_kwargs.get("""nbest_size""" )
if not enable_sampling:
__snake_case : str = self.sp_model.EncodeAsPieces(_lowerCAmelCase )
else:
__snake_case : Tuple = self.sp_model.SampleEncodeAsPieces(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__snake_case : Union[str, Any] = []
for pi, piece in enumerate(_lowerCAmelCase ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(_lowerCAmelCase ) and pi != 0:
new_pieces.append(_lowerCAmelCase )
continue
else:
continue
__snake_case : Optional[int] = 0
for i, chunk in enumerate(_lowerCAmelCase ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(_lowerCAmelCase ) or self.is_punct(_lowerCAmelCase ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(_lowerCAmelCase )
__snake_case : Tuple = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
__snake_case : List[str] = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
__snake_case : Tuple = i
if len(_lowerCAmelCase ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : int ):
__snake_case : int = """""".join(_lowerCAmelCase ).replace(_lowerCAmelCase , """ """ ).strip()
return out_string
def snake_case__ ( self : Dict , _lowerCAmelCase : Tuple ):
__snake_case : int = self.convert_ids_to_tokens(_lowerCAmelCase )
__snake_case : Any = """""".join(_lowerCAmelCase ).replace(_lowerCAmelCase , """ """ ).strip()
return out_string
def snake_case__ ( self : Dict , _lowerCAmelCase : Tuple ):
return self.vocab.get(_lowerCAmelCase , self.vocab.get(self.unk_token ) )
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : Dict ):
return self.reverse_vocab.get(_lowerCAmelCase , self.unk_token )
def snake_case__ ( self : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any]=None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__snake_case : Union[str, Any] = [self.cls_token_id]
__snake_case : Optional[Any] = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def snake_case__ ( self : List[str] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any]=None ):
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Optional[Any]=False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1]
return [1] + ([0] * len(_lowerCAmelCase )) + [1]
def snake_case__ ( self : Optional[int] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(_lowerCAmelCase ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(_lowerCAmelCase ) + 1) + [1] * (len(_lowerCAmelCase ) + 3)
def snake_case__ ( self : Tuple , _lowerCAmelCase : List[str] ):
if "\u4e00" <= char <= "\u9fff":
return True
return False
def snake_case__ ( self : List[str] , _lowerCAmelCase : Any ):
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def snake_case__ ( self : int , _lowerCAmelCase : List[Any] ):
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def snake_case__ ( self : str , _lowerCAmelCase : Optional[Any] ):
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(_lowerCAmelCase ) == 1:
__snake_case : Dict = unicodedata.category(_lowerCAmelCase )
if cat == "Zs":
return True
return False
def snake_case__ ( self : str , _lowerCAmelCase : List[Any] ):
__snake_case : Dict = {}
with io.open(_lowerCAmelCase , """r""" , encoding="""utf-8""" ) as f:
for index, line in enumerate(_lowerCAmelCase ):
__snake_case : Tuple = line.rstrip("""\n""" )
__snake_case : List[str] = int(_lowerCAmelCase )
return token_to_idx
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
__snake_case : Optional[int] = 0
if os.path.isdir(_lowerCAmelCase ):
__snake_case : int = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
__snake_case : Optional[Any] = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda _lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
""" Please check that the vocabulary is not corrupted!""" )
__snake_case : Union[str, Any] = token_index
writer.write(token + """\n""" )
index += 1
__snake_case : List[Any] = os.path.join(_lowerCAmelCase , """sentencepiece.bpe.model""" )
with open(_lowerCAmelCase , """wb""" ) as fi:
__snake_case : List[Any] = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (vocab_file,)
| 20
| 0
|
from __future__ import annotations
from typing import TypedDict
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : str
A : int
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise TypeError("""The parameter s type must be str.""" )
return [s[i:] + s[:i] for i in range(len(__SCREAMING_SNAKE_CASE ) )]
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise TypeError("""The parameter s type must be str.""" )
if not s:
raise ValueError("""The parameter s must not be empty.""" )
__snake_case : Dict = all_rotations(__SCREAMING_SNAKE_CASE )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
__snake_case : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__SCREAMING_SNAKE_CASE ),
}
return response
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise TypeError("""The parameter bwt_string type must be str.""" )
if not bwt_string:
raise ValueError("""The parameter bwt_string must not be empty.""" )
try:
__snake_case : int = int(__SCREAMING_SNAKE_CASE )
except ValueError:
raise TypeError(
"""The parameter idx_original_string type must be int or passive"""
""" of cast to int.""" )
if idx_original_string < 0:
raise ValueError("""The parameter idx_original_string must not be lower than 0.""" )
if idx_original_string >= len(__SCREAMING_SNAKE_CASE ):
raise ValueError(
"""The parameter idx_original_string must be lower than""" """ len(bwt_string).""" )
__snake_case : str = [""""""] * len(__SCREAMING_SNAKE_CASE )
for _ in range(len(__SCREAMING_SNAKE_CASE ) ):
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
__snake_case : Any = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
lowercase_ = "Provide a string that I will generate its BWT transform: "
lowercase_ = input(entry_msg).strip()
lowercase_ = bwt_transform(s)
print(
F'''Burrows Wheeler transform for string \'{s}\' results '''
F'''in \'{result["bwt_string"]}\''''
)
lowercase_ = reverse_bwt(result["bwt_string"], result["idx_original_string"])
print(
F'''Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' '''
F'''we get original string \'{original_string}\''''
)
| 366
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"xlm-mlm-en-2048": "https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json",
"xlm-mlm-ende-1024": "https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json",
"xlm-mlm-enfr-1024": "https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json",
"xlm-mlm-enro-1024": "https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json",
"xlm-mlm-tlm-xnli15-1024": "https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json",
"xlm-mlm-xnli15-1024": "https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json",
"xlm-clm-enfr-1024": "https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json",
"xlm-clm-ende-1024": "https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json",
"xlm-mlm-17-1280": "https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json",
"xlm-mlm-100-1280": "https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : List[str] = "xlm"
A : List[str] = {
"hidden_size": "emb_dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
"n_words": "vocab_size", # For backward compatibility
}
def __init__( self : List[Any] , _lowerCAmelCase : Optional[Any]=3_01_45 , _lowerCAmelCase : Optional[Any]=20_48 , _lowerCAmelCase : Dict=12 , _lowerCAmelCase : int=16 , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : str=True , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : Dict=False , _lowerCAmelCase : Dict=1 , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : Optional[Any]=5_12 , _lowerCAmelCase : List[Any]=20_48**-0.5 , _lowerCAmelCase : List[str]=1e-12 , _lowerCAmelCase : List[Any]=0.02 , _lowerCAmelCase : List[str]=0 , _lowerCAmelCase : Optional[Any]=1 , _lowerCAmelCase : Dict=2 , _lowerCAmelCase : List[str]=3 , _lowerCAmelCase : Tuple=5 , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Tuple="first" , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Dict=None , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : Tuple=5 , _lowerCAmelCase : List[str]=5 , _lowerCAmelCase : Optional[Any]=0 , _lowerCAmelCase : Tuple=0 , _lowerCAmelCase : Union[str, Any]=2 , _lowerCAmelCase : Union[str, Any]=0 , **_lowerCAmelCase : Tuple , ):
__snake_case : Optional[Any] = vocab_size
__snake_case : Tuple = emb_dim
__snake_case : int = n_layers
__snake_case : List[str] = n_heads
__snake_case : Union[str, Any] = dropout
__snake_case : Optional[int] = attention_dropout
__snake_case : Optional[Any] = gelu_activation
__snake_case : Tuple = sinusoidal_embeddings
__snake_case : List[Any] = causal
__snake_case : Dict = asm
__snake_case : int = n_langs
__snake_case : str = use_lang_emb
__snake_case : Dict = layer_norm_eps
__snake_case : List[Any] = bos_index
__snake_case : Union[str, Any] = eos_index
__snake_case : Dict = pad_index
__snake_case : Any = unk_index
__snake_case : Dict = mask_index
__snake_case : Any = is_encoder
__snake_case : Dict = max_position_embeddings
__snake_case : Optional[Any] = embed_init_std
__snake_case : List[Any] = init_std
__snake_case : str = summary_type
__snake_case : Optional[Any] = summary_use_proj
__snake_case : str = summary_activation
__snake_case : Optional[int] = summary_proj_to_labels
__snake_case : Dict = summary_first_dropout
__snake_case : Dict = start_n_top
__snake_case : int = end_n_top
__snake_case : str = mask_token_id
__snake_case : int = lang_id
if "n_words" in kwargs:
__snake_case : Dict = kwargs["""n_words"""]
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
@property
def snake_case__ ( self : Dict ):
if self.task == "multiple-choice":
__snake_case : Optional[int] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__snake_case : Tuple = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 20
| 0
|
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
lowercase_ = "Usage of script: script_name <size_of_canvas:int>"
lowercase_ = [0] * 1_00 + [1] * 10
random.shuffle(choice)
def __lowerCAmelCase ( __UpperCAmelCase : int ):
'''simple docstring'''
__snake_case : List[str] = [[False for i in range(__SCREAMING_SNAKE_CASE )] for j in range(__SCREAMING_SNAKE_CASE )]
return canvas
def __lowerCAmelCase ( __UpperCAmelCase : list[list[bool]] ):
'''simple docstring'''
for i, row in enumerate(__SCREAMING_SNAKE_CASE ):
for j, _ in enumerate(__SCREAMING_SNAKE_CASE ):
__snake_case : int = bool(random.getrandbits(1 ) )
def __lowerCAmelCase ( __UpperCAmelCase : list[list[bool]] ):
'''simple docstring'''
__snake_case : Union[str, Any] = np.array(__SCREAMING_SNAKE_CASE )
__snake_case : List[Any] = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(__SCREAMING_SNAKE_CASE ):
for c, pt in enumerate(__SCREAMING_SNAKE_CASE ):
__snake_case : Optional[Any] = __judge_point(
__SCREAMING_SNAKE_CASE , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
__snake_case : List[str] = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
__snake_case : list[list[bool]] = current_canvas.tolist()
return return_canvas
def __lowerCAmelCase ( __UpperCAmelCase : bool , __UpperCAmelCase : list[list[bool]] ):
'''simple docstring'''
__snake_case : Any = 0
__snake_case : Dict = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
__snake_case : str = pt
if pt:
if alive < 2:
__snake_case : Optional[Any] = False
elif alive == 2 or alive == 3:
__snake_case : Union[str, Any] = True
elif alive > 3:
__snake_case : Optional[int] = False
else:
if alive == 3:
__snake_case : List[Any] = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
lowercase_ = int(sys.argv[1])
# main working structure of this module.
lowercase_ = create_canvas(canvas_size)
seed(c)
lowercase_ , lowercase_ = plt.subplots()
fig.show()
lowercase_ = ListedColormap(["w", "k"])
try:
while True:
lowercase_ = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 367
|
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : List[Any] = "encodec"
def __init__( self : Tuple , _lowerCAmelCase : Union[str, Any]=[1.5, 3.0, 6.0, 12.0, 24.0] , _lowerCAmelCase : Tuple=2_40_00 , _lowerCAmelCase : List[Any]=1 , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : int=1_28 , _lowerCAmelCase : List[Any]=32 , _lowerCAmelCase : Optional[Any]=1 , _lowerCAmelCase : Union[str, Any]=[8, 5, 4, 2] , _lowerCAmelCase : str="weight_norm" , _lowerCAmelCase : Tuple=7 , _lowerCAmelCase : str=7 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : int=2 , _lowerCAmelCase : str=True , _lowerCAmelCase : Dict="reflect" , _lowerCAmelCase : Tuple=2 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : int=1.0 , _lowerCAmelCase : Optional[int]=10_24 , _lowerCAmelCase : int=None , _lowerCAmelCase : List[str]=True , **_lowerCAmelCase : List[Any] , ):
__snake_case : Optional[int] = target_bandwidths
__snake_case : int = sampling_rate
__snake_case : List[Any] = audio_channels
__snake_case : str = normalize
__snake_case : Union[str, Any] = chunk_length_s
__snake_case : Union[str, Any] = overlap
__snake_case : Union[str, Any] = hidden_size
__snake_case : Union[str, Any] = num_filters
__snake_case : Optional[Any] = num_residual_layers
__snake_case : List[Any] = upsampling_ratios
__snake_case : List[str] = norm_type
__snake_case : Union[str, Any] = kernel_size
__snake_case : Optional[int] = last_kernel_size
__snake_case : Optional[Any] = residual_kernel_size
__snake_case : Dict = dilation_growth_rate
__snake_case : int = use_causal_conv
__snake_case : Tuple = pad_mode
__snake_case : str = compress
__snake_case : Optional[Any] = num_lstm_layers
__snake_case : List[Any] = trim_right_ratio
__snake_case : Any = codebook_size
__snake_case : int = codebook_dim if codebook_dim is not None else hidden_size
__snake_case : int = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''' )
super().__init__(**_lowerCAmelCase )
@property
def snake_case__ ( self : int ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def snake_case__ ( self : int ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def snake_case__ ( self : Union[str, Any] ):
__snake_case : List[str] = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def snake_case__ ( self : Tuple ):
return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 20
| 0
|
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class SCREAMING_SNAKE_CASE__ :
A : Optional[str] = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be trained."} )
A : Optional[str] = field(
default="./" , metadata={"help": "Save dir where model repo is cloned and models updates are saved to."} )
A : Optional[str] = field(
default="codeparrot/codeparrot-clean-train" , metadata={"help": "Name or path of training dataset."} )
A : Optional[str] = field(
default="codeparrot/codeparrot-clean-valid" , metadata={"help": "Name or path of validation dataset."} )
A : Optional[int] = field(default=2 , metadata={"help": "Batch size for training."} )
A : Optional[int] = field(default=2 , metadata={"help": "Batch size for evaluation."} )
A : Optional[float] = field(default=0.1 , metadata={"help": "Value of weight decay."} )
A : Optional[int] = field(
default=10000 , metadata={"help": "Size of buffer used to shuffle streaming dataset."} )
A : Optional[float] = field(default=2e-4 , metadata={"help": "Learning rate fo training."} )
A : Optional[str] = field(default="cosine" , metadata={"help": "Learning rate."} )
A : Optional[int] = field(
default=750 , metadata={"help": "Number of warmup steps in the learning rate schedule."} )
A : Optional[int] = field(
default=16 , metadata={"help": "Number of gradient accumulation steps."} )
A : Optional[bool] = field(
default=__UpperCamelCase , metadata={"help": "Use gradient checkpointing to reduce memory footprint."} )
A : Optional[int] = field(default=50000 , metadata={"help": "Maximum number of training steps."} )
A : Optional[int] = field(
default=-1 , metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} )
A : Optional[int] = field(default=1024 , metadata={"help": "Sequence lengths used for training."} )
A : Optional[int] = field(default=1 , metadata={"help": "Training seed."} )
A : Optional[int] = field(
default=1024 , metadata={"help": "Interval to save checkpoints. Measured as number of forward passes not training steps."} , )
A : Optional[str] = field(
default=__UpperCamelCase , metadata={"help": "States path if the training should continue from a checkpoint folder."} )
A : Optional[bool] = field(default=__UpperCamelCase , metadata={"help": "If True the data is pretokenized."} )
@dataclass
class SCREAMING_SNAKE_CASE__ :
A : Optional[str] = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be evaluated."} )
A : Optional[str] = field(
default="codeparrot/codeparrot-clean-valid" , metadata={"help": "Name or path of validation dataset."} )
A : Optional[int] = field(default=2 , metadata={"help": "Batch size used for evaluation."} )
A : Optional[int] = field(
default=-1 , metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} )
A : Optional[int] = field(default=1024 , metadata={"help": "Length of sequences to be evaluated."} )
A : Optional[int] = field(default=1 , metadata={"help": "Random seed used for evaluation."} )
@dataclass
class SCREAMING_SNAKE_CASE__ :
A : Optional[str] = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be evaluated."} )
A : Optional[int] = field(default=__UpperCamelCase , metadata={"help": "Number of workers used for code evaluation."} )
A : Optional[int] = field(
default=__UpperCamelCase , metadata={"help": "The number of human-eval tasks to run. If not included all tasks are evaluated."} , )
A : Optional[bool] = field(
default=__UpperCamelCase , metadata={"help": "Sample from the language model's output distribution."} )
A : Optional[float] = field(default=0.2 , metadata={"help": "Sampling temperature used for generation."} )
A : Optional[int] = field(default=256 , metadata={"help": "Maximum number of newly generated tokens."} )
A : Optional[int] = field(default=0 , metadata={"help": "Top-k parameter used for generation."} )
A : Optional[float] = field(default=0.95 , metadata={"help": "Top-p parameter used for nucleus sampling."} )
A : Optional[int] = field(default=10 , metadata={"help": "Number of generations to run in parallel."} )
A : Optional[int] = field(
default=200 , metadata={"help": "Number of completions to generate for each sample."} )
A : Optional[int] = field(default=1 , metadata={"help": "Random seed used for evaluation."} )
A : Optional[str] = field(
default="eval_results.json" , metadata={"help": "Random seed used for evaluation."} )
A : Optional[str] = field(
default="0" , metadata={"help": "Allow `code_eval` to execute Python code on machine"} )
A : Optional[int] = field(
default=-1 , metadata={
"help": (
"Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"
" number corresponds to which GPU device id to run on."
)
} , )
@dataclass
class SCREAMING_SNAKE_CASE__ :
A : Optional[int] = field(
default=__UpperCamelCase , metadata={
"help": "The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."
} , )
A : Optional[str] = field(
default="transformersbook/codeparrot" , metadata={"help": "Folder or name of dataset to process."} )
A : Optional[str] = field(
default="codeparrot-clean" , metadata={"help": "Folder to save processed processed dataset."} )
A : Optional[int] = field(
default=100000 , metadata={"help": "Number of files to save per JSON output file."} )
A : Optional[str] = field(default="content" , metadata={"help": "Column containing text data to process."} )
A : Optional[float] = field(
default=1000 , metadata={"help": "Maximum line length in file, otherwise file is filtered."} )
A : Optional[float] = field(
default=100 , metadata={"help": "Maximum mean line length in file, otherwise file is filtered."} )
A : Optional[float] = field(
default=0.25 , metadata={"help": "Maximum fraction of non-alphanumeric characters, otherwise file is filtered."} )
A : Optional[float] = field(
default=1.5 , metadata={"help": "Minimum character token ratio for the file, otherwise file is filtered."} )
A : Optional[float] = field(
default=0.7 , metadata={"help": "Probability for filtering config, test and uncommon files."} )
A : Optional[str] = field(
default="codeparrot/codeparrot" , metadata={"help": "Name or path to the tokenizer."} , )
A : Optional[bool] = field(
default=__UpperCamelCase , metadata={"help": "If True, near-duplicate samples are removed."} )
A : Optional[float] = field(
default=0.85 , metadata={"help": "Jaccard threshold for near-duplicate samples."} )
@dataclass
class SCREAMING_SNAKE_CASE__ :
A : Optional[str] = field(
default="gpt2" , metadata={"help": "Base tokenizer to build new tokenizer from."} )
A : Optional[str] = field(
default="transformersbook/codeparrot-train" , metadata={"help": "Dataset to train tokenizer on."} )
A : Optional[str] = field(default="content" , metadata={"help": "Column containing text data to process."} )
A : Optional[int] = field(default=200000 , metadata={"help": "Number of examples to train tokenizer on."} )
A : Optional[int] = field(
default=32768 , metadata={"help": "Number of examples to train the tokenizer on."} )
A : Optional[str] = field(default="codeparrot" , metadata={"help": "Name of new tokenizer."} )
A : Optional[bool] = field(default=__UpperCamelCase , metadata={"help": "Push saved tokenizer to the hub."} )
@dataclass
class SCREAMING_SNAKE_CASE__ :
A : Optional[str] = field(
default="codeparrot/codeparrot" , metadata={"help": "Name or path to the tokenizer."} )
A : Optional[str] = field(
default="codeparrot/codeparrot-clean-train" , metadata={"help": "Name or path to the dataset to pretokenize."} )
A : Optional[str] = field(
default="tokenized-codeparrot-train" , metadata={"help": "Repo name of the pretokenized data."} )
A : Optional[int] = field(default=__UpperCamelCase , metadata={"help": "Number of workers used for code evaluation."} )
@dataclass
class SCREAMING_SNAKE_CASE__ :
A : Optional[str] = field(
default="gpt2-large" , metadata={"help": "Configuration to use for model initialization."} )
A : Optional[str] = field(
default="codeparrot/codeparrot" , metadata={"help": "Tokenizer attached to model."} )
A : Optional[str] = field(default="codeparrot" , metadata={"help": "Name of the created model."} )
A : Optional[bool] = field(default=__UpperCamelCase , metadata={"help": "Push saved tokenizer to the hub."} )
| 368
|
from __future__ import annotations
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
__snake_case : str = []
__snake_case , __snake_case : List[str] = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
__snake_case : List[Any] = result + left + right
return input_list
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list ):
'''simple docstring'''
if len(__SCREAMING_SNAKE_CASE ) <= 1:
return input_list
__snake_case : Union[str, Any] = list(__SCREAMING_SNAKE_CASE )
# iteration for two-way merging
__snake_case : Tuple = 2
while p <= len(__SCREAMING_SNAKE_CASE ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ):
__snake_case : List[str] = i
__snake_case : str = i + p - 1
__snake_case : Optional[Any] = (low + high + 1) // 2
__snake_case : str = merge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# final merge of last two parts
if p * 2 >= len(__SCREAMING_SNAKE_CASE ):
__snake_case : List[str] = i
__snake_case : str = merge(__SCREAMING_SNAKE_CASE , 0 , __SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
lowercase_ = input("Enter numbers separated by a comma:\n").strip()
if user_input == "":
lowercase_ = []
else:
lowercase_ = [int(item.strip()) for item in user_input.split(",")]
print(iter_merge_sort(unsorted))
| 20
| 0
|
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowercase_ = logging.get_logger(__name__)
# General docstring
lowercase_ = "RegNetConfig"
# Base docstring
lowercase_ = "facebook/regnet-y-040"
lowercase_ = [1, 10_88, 7, 7]
# Image classification docstring
lowercase_ = "facebook/regnet-y-040"
lowercase_ = "tabby, tabby cat"
lowercase_ = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
def __init__( self : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : int = 3 , _lowerCAmelCase : int = 1 , _lowerCAmelCase : int = 1 , _lowerCAmelCase : Optional[str] = "relu" , **_lowerCAmelCase : Union[str, Any] , ):
super().__init__(**_lowerCAmelCase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
__snake_case : Dict = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
__snake_case : Optional[Any] = tf.keras.layers.ConvaD(
filters=_lowerCAmelCase , kernel_size=_lowerCAmelCase , strides=_lowerCAmelCase , padding="""VALID""" , groups=_lowerCAmelCase , use_bias=_lowerCAmelCase , name="""convolution""" , )
__snake_case : str = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="""normalization""" )
__snake_case : int = ACTaFN[activation] if activation is not None else tf.identity
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : Dict ):
__snake_case : List[Any] = self.convolution(self.padding(_lowerCAmelCase ) )
__snake_case : List[str] = self.normalization(_lowerCAmelCase )
__snake_case : Optional[int] = self.activation(_lowerCAmelCase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
def __init__( self : str , _lowerCAmelCase : RegNetConfig , **_lowerCAmelCase : List[str] ):
super().__init__(**_lowerCAmelCase )
__snake_case : Tuple = config.num_channels
__snake_case : Any = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="""embedder""" , )
def snake_case__ ( self : List[Any] , _lowerCAmelCase : Optional[int] ):
__snake_case : List[str] = shape_list(_lowerCAmelCase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
__snake_case : Tuple = tf.transpose(_lowerCAmelCase , perm=(0, 2, 3, 1) )
__snake_case : Union[str, Any] = self.embedder(_lowerCAmelCase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : int = 2 , **_lowerCAmelCase : Dict ):
super().__init__(**_lowerCAmelCase )
__snake_case : int = tf.keras.layers.ConvaD(
filters=_lowerCAmelCase , kernel_size=1 , strides=_lowerCAmelCase , use_bias=_lowerCAmelCase , name="""convolution""" )
__snake_case : Any = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="""normalization""" )
def snake_case__ ( self : List[str] , _lowerCAmelCase : tf.Tensor , _lowerCAmelCase : bool = False ):
return self.normalization(self.convolution(_lowerCAmelCase ) , training=_lowerCAmelCase )
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
def __init__( self : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : int , **_lowerCAmelCase : Optional[int] ):
super().__init__(**_lowerCAmelCase )
__snake_case : str = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_lowerCAmelCase , name="""pooler""" )
__snake_case : int = [
tf.keras.layers.ConvaD(filters=_lowerCAmelCase , kernel_size=1 , activation="""relu""" , name="""attention.0""" ),
tf.keras.layers.ConvaD(filters=_lowerCAmelCase , kernel_size=1 , activation="""sigmoid""" , name="""attention.2""" ),
]
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : Optional[int] ):
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
__snake_case : Optional[int] = self.pooler(_lowerCAmelCase )
for layer_module in self.attention:
__snake_case : int = layer_module(_lowerCAmelCase )
__snake_case : List[Any] = hidden_state * pooled
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
def __init__( self : str , _lowerCAmelCase : RegNetConfig , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int = 1 , **_lowerCAmelCase : Dict ):
super().__init__(**_lowerCAmelCase )
__snake_case : str = in_channels != out_channels or stride != 1
__snake_case : int = max(1 , out_channels // config.groups_width )
__snake_case : Dict = (
TFRegNetShortCut(_lowerCAmelCase , stride=_lowerCAmelCase , name="""shortcut""" )
if should_apply_shortcut
else tf.keras.layers.Activation("""linear""" , name="""shortcut""" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
__snake_case : Any = [
TFRegNetConvLayer(_lowerCAmelCase , kernel_size=1 , activation=config.hidden_act , name="""layer.0""" ),
TFRegNetConvLayer(
_lowerCAmelCase , stride=_lowerCAmelCase , groups=_lowerCAmelCase , activation=config.hidden_act , name="""layer.1""" ),
TFRegNetConvLayer(_lowerCAmelCase , kernel_size=1 , activation=_lowerCAmelCase , name="""layer.2""" ),
]
__snake_case : Optional[int] = ACTaFN[config.hidden_act]
def snake_case__ ( self : Tuple , _lowerCAmelCase : Union[str, Any] ):
__snake_case : Optional[Any] = hidden_state
for layer_module in self.layers:
__snake_case : List[str] = layer_module(_lowerCAmelCase )
__snake_case : Union[str, Any] = self.shortcut(_lowerCAmelCase )
hidden_state += residual
__snake_case : Any = self.activation(_lowerCAmelCase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
def __init__( self : Optional[int] , _lowerCAmelCase : RegNetConfig , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int = 1 , **_lowerCAmelCase : Optional[int] ):
super().__init__(**_lowerCAmelCase )
__snake_case : Optional[Any] = in_channels != out_channels or stride != 1
__snake_case : Dict = max(1 , out_channels // config.groups_width )
__snake_case : Optional[Any] = (
TFRegNetShortCut(_lowerCAmelCase , stride=_lowerCAmelCase , name="""shortcut""" )
if should_apply_shortcut
else tf.keras.layers.Activation("""linear""" , name="""shortcut""" )
)
__snake_case : int = [
TFRegNetConvLayer(_lowerCAmelCase , kernel_size=1 , activation=config.hidden_act , name="""layer.0""" ),
TFRegNetConvLayer(
_lowerCAmelCase , stride=_lowerCAmelCase , groups=_lowerCAmelCase , activation=config.hidden_act , name="""layer.1""" ),
TFRegNetSELayer(_lowerCAmelCase , reduced_channels=int(round(in_channels / 4 ) ) , name="""layer.2""" ),
TFRegNetConvLayer(_lowerCAmelCase , kernel_size=1 , activation=_lowerCAmelCase , name="""layer.3""" ),
]
__snake_case : Tuple = ACTaFN[config.hidden_act]
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : List[Any] ):
__snake_case : int = hidden_state
for layer_module in self.layers:
__snake_case : List[str] = layer_module(_lowerCAmelCase )
__snake_case : Union[str, Any] = self.shortcut(_lowerCAmelCase )
hidden_state += residual
__snake_case : int = self.activation(_lowerCAmelCase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
def __init__( self : str , _lowerCAmelCase : RegNetConfig , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 2 , **_lowerCAmelCase : List[Any] ):
super().__init__(**_lowerCAmelCase )
__snake_case : Tuple = TFRegNetXLayer if config.layer_type == """x""" else TFRegNetYLayer
__snake_case : Optional[Any] = [
# downsampling is done in the first layer with stride of 2
layer(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , stride=_lowerCAmelCase , name="""layers.0""" ),
*[layer(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , name=f'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def snake_case__ ( self : List[Any] , _lowerCAmelCase : Dict ):
for layer_module in self.layers:
__snake_case : int = layer_module(_lowerCAmelCase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
def __init__( self : Tuple , _lowerCAmelCase : RegNetConfig , **_lowerCAmelCase : int ):
super().__init__(**_lowerCAmelCase )
__snake_case : Union[str, Any] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
_lowerCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="""stages.0""" , ) )
__snake_case : Optional[int] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(_lowerCAmelCase , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , depth=_lowerCAmelCase , name=f'''stages.{i+1}''' ) )
def snake_case__ ( self : Any , _lowerCAmelCase : tf.Tensor , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = True ):
__snake_case : Union[str, Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__snake_case : Optional[Any] = hidden_states + (hidden_state,)
__snake_case : Optional[Any] = stage_module(_lowerCAmelCase )
if output_hidden_states:
__snake_case : int = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=_lowerCAmelCase , hidden_states=_lowerCAmelCase )
@keras_serializable
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
A : List[Any] = RegNetConfig
def __init__( self : Tuple , _lowerCAmelCase : List[Any] , **_lowerCAmelCase : str ):
super().__init__(**_lowerCAmelCase )
__snake_case : List[str] = config
__snake_case : List[str] = TFRegNetEmbeddings(_lowerCAmelCase , name="""embedder""" )
__snake_case : Any = TFRegNetEncoder(_lowerCAmelCase , name="""encoder""" )
__snake_case : Optional[int] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_lowerCAmelCase , name="""pooler""" )
@unpack_inputs
def snake_case__ ( self : Dict , _lowerCAmelCase : tf.Tensor , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : bool = False , ):
__snake_case : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__snake_case : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case : Tuple = self.embedder(_lowerCAmelCase , training=_lowerCAmelCase )
__snake_case : str = self.encoder(
_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , return_dict=_lowerCAmelCase , training=_lowerCAmelCase )
__snake_case : str = encoder_outputs[0]
__snake_case : List[Any] = self.pooler(_lowerCAmelCase )
# Change to NCHW output format have uniformity in the modules
__snake_case : Dict = tf.transpose(_lowerCAmelCase , perm=(0, 3, 1, 2) )
__snake_case : int = tf.transpose(_lowerCAmelCase , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
__snake_case : Tuple = tuple([tf.transpose(_lowerCAmelCase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_lowerCAmelCase , pooler_output=_lowerCAmelCase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : Union[str, Any] = RegNetConfig
A : str = "regnet"
A : Any = "pixel_values"
@property
def snake_case__ ( self : Optional[int] ):
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) , dtype=tf.floataa )}
lowercase_ = r"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n"
lowercase_ = r"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , __UpperCamelCase , )
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __init__( self : List[Any] , _lowerCAmelCase : RegNetConfig , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : List[str] ):
super().__init__(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
__snake_case : Optional[int] = TFRegNetMainLayer(_lowerCAmelCase , name="""regnet""" )
@unpack_inputs
@add_start_docstrings_to_model_forward(_lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_lowerCAmelCase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def snake_case__ ( self : str , _lowerCAmelCase : tf.Tensor , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : List[Any]=False , ):
__snake_case : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__snake_case : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case : List[Any] = self.regnet(
pixel_values=_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , return_dict=_lowerCAmelCase , training=_lowerCAmelCase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , __UpperCamelCase , )
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , __UpperCamelCase ):
def __init__( self : str , _lowerCAmelCase : RegNetConfig , *_lowerCAmelCase : Dict , **_lowerCAmelCase : Optional[Any] ):
super().__init__(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
__snake_case : Union[str, Any] = config.num_labels
__snake_case : Optional[Any] = TFRegNetMainLayer(_lowerCAmelCase , name="""regnet""" )
# classification head
__snake_case : List[Any] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="""classifier.1""" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(_lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_lowerCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def snake_case__ ( self : List[Any] , _lowerCAmelCase : tf.Tensor = None , _lowerCAmelCase : tf.Tensor = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : Dict=False , ):
__snake_case : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__snake_case : int = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case : List[str] = self.regnet(
_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , return_dict=_lowerCAmelCase , training=_lowerCAmelCase )
__snake_case : Dict = outputs.pooler_output if return_dict else outputs[1]
__snake_case : Dict = self.classifier[0](_lowerCAmelCase )
__snake_case : Any = self.classifier[1](_lowerCAmelCase )
__snake_case : Optional[int] = None if labels is None else self.hf_compute_loss(labels=_lowerCAmelCase , logits=_lowerCAmelCase )
if not return_dict:
__snake_case : Tuple = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=_lowerCAmelCase , logits=_lowerCAmelCase , hidden_states=outputs.hidden_states )
| 369
|
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
lowercase_ = "Usage of script: script_name <size_of_canvas:int>"
lowercase_ = [0] * 1_00 + [1] * 10
random.shuffle(choice)
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
__snake_case : List[str] = [[False for i in range(__SCREAMING_SNAKE_CASE )] for j in range(__SCREAMING_SNAKE_CASE )]
return canvas
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list[list[bool]] ):
'''simple docstring'''
for i, row in enumerate(__SCREAMING_SNAKE_CASE ):
for j, _ in enumerate(__SCREAMING_SNAKE_CASE ):
__snake_case : int = bool(random.getrandbits(1 ) )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list[list[bool]] ):
'''simple docstring'''
__snake_case : Union[str, Any] = np.array(__SCREAMING_SNAKE_CASE )
__snake_case : List[Any] = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(__SCREAMING_SNAKE_CASE ):
for c, pt in enumerate(__SCREAMING_SNAKE_CASE ):
__snake_case : Optional[Any] = __judge_point(
__SCREAMING_SNAKE_CASE , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
__snake_case : List[str] = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
__snake_case : list[list[bool]] = current_canvas.tolist()
return return_canvas
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : bool , __SCREAMING_SNAKE_CASE : list[list[bool]] ):
'''simple docstring'''
__snake_case : Any = 0
__snake_case : Dict = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
__snake_case : str = pt
if pt:
if alive < 2:
__snake_case : Optional[Any] = False
elif alive == 2 or alive == 3:
__snake_case : Union[str, Any] = True
elif alive > 3:
__snake_case : Optional[int] = False
else:
if alive == 3:
__snake_case : List[Any] = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
lowercase_ = int(sys.argv[1])
# main working structure of this module.
lowercase_ = create_canvas(canvas_size)
seed(c)
lowercase_ , lowercase_ = plt.subplots()
fig.show()
lowercase_ = ListedColormap(["w", "k"])
try:
while True:
lowercase_ = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 20
| 0
|
from __future__ import annotations
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
__snake_case : str = []
__snake_case : List[str] = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
__snake_case : List[Any] = result + left + right
return input_list
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list ):
'''simple docstring'''
if len(__SCREAMING_SNAKE_CASE ) <= 1:
return input_list
__snake_case : Union[str, Any] = list(__SCREAMING_SNAKE_CASE )
# iteration for two-way merging
__snake_case : Tuple = 2
while p <= len(__SCREAMING_SNAKE_CASE ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ):
__snake_case : List[str] = i
__snake_case : str = i + p - 1
__snake_case : Optional[Any] = (low + high + 1) // 2
__snake_case : str = merge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# final merge of last two parts
if p * 2 >= len(__SCREAMING_SNAKE_CASE ):
__snake_case : List[str] = i
__snake_case : str = merge(__SCREAMING_SNAKE_CASE , 0 , __SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
lowercase_ = input("Enter numbers separated by a comma:\n").strip()
if user_input == "":
lowercase_ = []
else:
lowercase_ = [int(item.strip()) for item in user_input.split(",")]
print(iter_merge_sort(unsorted))
| 370
|
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def __lowerCAmelCase ( *__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Union[Dict, Any]] = None , __SCREAMING_SNAKE_CASE : Any=True , __SCREAMING_SNAKE_CASE : int=2 ):
'''simple docstring'''
from .. import __version__
__snake_case : List[Any] = take_from
__snake_case : List[Any] = ()
if not isinstance(args[0] , __SCREAMING_SNAKE_CASE ):
__snake_case : str = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(__SCREAMING_SNAKE_CASE ).base_version ) >= version.parse(__SCREAMING_SNAKE_CASE ):
raise ValueError(
F'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
F''' version {__version__} is >= {version_name}''' )
__snake_case : Optional[Any] = None
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(__SCREAMING_SNAKE_CASE ),)
__snake_case : Optional[Any] = F'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
values += (getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ),)
__snake_case : Any = F'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
__snake_case : Tuple = F'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
__snake_case : Optional[Any] = warning + """ """ if standard_warn else """"""
warnings.warn(warning + message , __SCREAMING_SNAKE_CASE , stacklevel=__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) > 0:
__snake_case : Dict = inspect.getouterframes(inspect.currentframe() )[1]
__snake_case : int = call_frame.filename
__snake_case : int = call_frame.lineno
__snake_case : List[str] = call_frame.function
__snake_case , __snake_case : List[Any] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(__SCREAMING_SNAKE_CASE ) == 0:
return
elif len(__SCREAMING_SNAKE_CASE ) == 1:
return values[0]
return values
| 20
| 0
|
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def snake_case__ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def snake_case__ ( self : int ):
__snake_case : str = FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
__snake_case : Union[str, Any] = """A painting of a squirrel eating a burger"""
__snake_case : Dict = jax.device_count()
__snake_case : int = num_samples * [prompt]
__snake_case : List[str] = sd_pipe.prepare_inputs(_lowerCAmelCase )
__snake_case : Union[str, Any] = replicate(_lowerCAmelCase )
__snake_case : Optional[Any] = shard(_lowerCAmelCase )
__snake_case : List[str] = jax.random.PRNGKey(0 )
__snake_case : int = jax.random.split(_lowerCAmelCase , jax.device_count() )
__snake_case : Optional[Any] = sd_pipe(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , num_inference_steps=25 , jit=_lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3)
__snake_case : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__snake_case : Optional[int] = images[0, 2_53:2_56, 2_53:2_56, -1]
__snake_case : Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__snake_case : Optional[Any] = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.45508, 0.4512] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def snake_case__ ( self : List[str] ):
__snake_case : Tuple = """stabilityai/stable-diffusion-2"""
__snake_case : Optional[int] = FlaxDPMSolverMultistepScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
__snake_case : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
_lowerCAmelCase , scheduler=_lowerCAmelCase , revision="""bf16""" , dtype=jnp.bfloataa , )
__snake_case : Optional[Any] = scheduler_params
__snake_case : List[str] = """A painting of a squirrel eating a burger"""
__snake_case : Dict = jax.device_count()
__snake_case : Union[str, Any] = num_samples * [prompt]
__snake_case : Any = sd_pipe.prepare_inputs(_lowerCAmelCase )
__snake_case : List[str] = replicate(_lowerCAmelCase )
__snake_case : Dict = shard(_lowerCAmelCase )
__snake_case : Optional[int] = jax.random.PRNGKey(0 )
__snake_case : List[str] = jax.random.split(_lowerCAmelCase , jax.device_count() )
__snake_case : Tuple = sd_pipe(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , num_inference_steps=25 , jit=_lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3)
__snake_case : List[Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__snake_case : Optional[Any] = images[0, 2_53:2_56, 2_53:2_56, -1]
__snake_case : Any = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__snake_case : Dict = jnp.array([0.4336, 0.42969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 371
|
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
lowercase_ = {
"bart": (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"bert": (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-base-cased-finetuned-mrpc": (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"dpr": (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"gpt2": (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlnet": (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlm": (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlm-roberta": (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"transfo-xl": (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"openai-gpt": (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"roberta": (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"layoutlm": (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"roberta-large-mnli": (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"camembert": (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"flaubert": (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"distilbert": (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"distilbert-base-distilled-squad": (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"lxmert": (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"lxmert-visual-feature-encoder": (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"ctrl": (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"albert": (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"t5": (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"electra": (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"wav2vec2": (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : List[Any]=True ):
'''simple docstring'''
if model_type not in MODEL_CLASSES:
raise ValueError(F'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' )
__snake_case , __snake_case , __snake_case , __snake_case : Any = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
__snake_case : int = cached_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , force_download=not use_cached_models )
__snake_case : Dict = config_class.from_json_file(__SCREAMING_SNAKE_CASE )
__snake_case : Tuple = True
__snake_case : Union[str, Any] = True
print(F'''Building TensorFlow model from configuration: {config}''' )
__snake_case : List[Any] = model_class(__SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
__snake_case : Optional[Any] = cached_file(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
__snake_case : List[Any] = load_pytorch_checkpoint_in_tfa_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if compare_with_pt_model:
__snake_case : Tuple = tf_model(tf_model.dummy_inputs , training=__SCREAMING_SNAKE_CASE ) # build the network
__snake_case : List[str] = torch.load(__SCREAMING_SNAKE_CASE , map_location="""cpu""" )
__snake_case : Any = pt_model_class.from_pretrained(
pretrained_model_name_or_path=__SCREAMING_SNAKE_CASE , config=__SCREAMING_SNAKE_CASE , state_dict=__SCREAMING_SNAKE_CASE )
with torch.no_grad():
__snake_case : Union[str, Any] = pt_model(**pt_model.dummy_inputs )
__snake_case : Any = pto[0].numpy()
__snake_case : Optional[int] = tfo[0].numpy()
__snake_case : Optional[int] = np.amax(np.abs(np_pt - np_tf ) )
print(F'''Max absolute difference between models outputs {diff}''' )
assert diff <= 2E-2, F'''Error, model absolute difference is >2e-2: {diff}'''
# Save pytorch-model
print(F'''Save TensorFlow model to {tf_dump_path}''' )
tf_model.save_weights(__SCREAMING_SNAKE_CASE , save_format="""h5""" )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : List[Any]=False , __SCREAMING_SNAKE_CASE : Any=False , ):
'''simple docstring'''
if args_model_type is None:
__snake_case : Tuple = list(MODEL_CLASSES.keys() )
else:
__snake_case : Union[str, Any] = [args_model_type]
for j, model_type in enumerate(__SCREAMING_SNAKE_CASE , start=1 ):
print("""=""" * 1_0_0 )
print(F''' Converting model type {j}/{len(__SCREAMING_SNAKE_CASE )}: {model_type}''' )
print("""=""" * 1_0_0 )
if model_type not in MODEL_CLASSES:
raise ValueError(F'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' )
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Optional[int] = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
__snake_case : int = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
__snake_case : Union[str, Any] = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , start=1 ):
print("""-""" * 1_0_0 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F''' Skipping finetuned checkpoint {model_shortcut_name}''' )
continue
__snake_case : List[Any] = model_shortcut_name
elif only_convert_finetuned_models:
print(F''' Skipping not finetuned checkpoint {model_shortcut_name}''' )
continue
print(
F''' Converting checkpoint {i}/{len(__SCREAMING_SNAKE_CASE )}: {model_shortcut_name} - model_type {model_type}''' )
print("""-""" * 1_0_0 )
if config_shortcut_name in aws_config_map:
__snake_case : int = cached_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , force_download=not use_cached_models )
else:
__snake_case : Dict = config_shortcut_name
if model_shortcut_name in aws_model_maps:
__snake_case : Union[str, Any] = cached_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , force_download=not use_cached_models )
else:
__snake_case : List[Any] = model_shortcut_name
if os.path.isfile(__SCREAMING_SNAKE_CASE ):
__snake_case : List[str] = """converted_model"""
convert_pt_checkpoint_to_tf(
model_type=__SCREAMING_SNAKE_CASE , pytorch_checkpoint_path=__SCREAMING_SNAKE_CASE , config_file=__SCREAMING_SNAKE_CASE , tf_dump_path=os.path.join(__SCREAMING_SNAKE_CASE , model_shortcut_name + """-tf_model.h5""" ) , compare_with_pt_model=__SCREAMING_SNAKE_CASE , )
if remove_cached_files:
os.remove(__SCREAMING_SNAKE_CASE )
os.remove(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_dump_path", default=None, type=str, required=True, help="Path to the output Tensorflow dump file."
)
parser.add_argument(
"--model_type",
default=None,
type=str,
help=(
F'''Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and '''
"convert all the models from AWS."
),
)
parser.add_argument(
"--pytorch_checkpoint_path",
default=None,
type=str,
help=(
"Path to the PyTorch checkpoint path or shortcut name to download from AWS. "
"If not given, will download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--config_file",
default=None,
type=str,
help=(
"The config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture. If not given and "
"--pytorch_checkpoint_path is not given or is a shortcut name "
"use the configuration associated to the shortcut name on the AWS"
),
)
parser.add_argument(
"--compare_with_pt_model", action="store_true", help="Compare Tensorflow and PyTorch model predictions."
)
parser.add_argument(
"--use_cached_models",
action="store_true",
help="Use cached models if possible instead of updating to latest checkpoint versions.",
)
parser.add_argument(
"--remove_cached_files",
action="store_true",
help="Remove pytorch models after conversion (save memory when converting in batches).",
)
parser.add_argument("--only_convert_finetuned_models", action="store_true", help="Only convert finetuned models.")
lowercase_ = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 20
| 0
|
'''simple docstring'''
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __magic_name__ ( unittest.TestCase):
@parameterized.expand([(None,), ("""foo.json""",)] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : str ):
lowercase_ : Union[str, Any] = GenerationConfig(
do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowercase_ , config_name=lowercase_ )
lowercase_ : Optional[int] = GenerationConfig.from_pretrained(lowercase_ , config_name=lowercase_ )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , lowercase_ )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : int = AutoConfig.from_pretrained("""gpt2""" )
lowercase_ : List[Any] = GenerationConfig.from_model_config(lowercase_ )
lowercase_ : Optional[int] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(lowercase_ , lowercase_ )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Optional[int] = GenerationConfig()
lowercase_ : int = {
"""max_new_tokens""": 1024,
"""foo""": """bar""",
}
lowercase_ : List[str] = copy.deepcopy(lowercase_ )
lowercase_ : Tuple = generation_config.update(**lowercase_ )
# update_kwargs was not modified (no side effects)
self.assertEqual(lowercase_ , lowercase_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(lowercase_ , {"""foo""": """bar"""} )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : Dict = GenerationConfig()
lowercase_ : int = """bar"""
with tempfile.TemporaryDirectory("""test-generation-config""" ) as tmp_dir:
generation_config.save_pretrained(lowercase_ )
lowercase_ : Optional[int] = GenerationConfig.from_pretrained(lowercase_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , """bar""" )
lowercase_ : List[str] = GenerationConfig.from_model_config(lowercase_ )
assert not hasattr(lowercase_ , """foo""" ) # no new kwargs should be initialized if from config
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Optional[int] = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , lowercase_ )
self.assertEqual(default_config.num_beams , 1 )
lowercase_ : Dict = GenerationConfig(
do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , lowercase_ )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowercase_ )
lowercase_ : Tuple = GenerationConfig.from_pretrained(lowercase_ , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , lowercase_ )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __magic_name__ ( unittest.TestCase):
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Any ):
lowercase_ : int = TOKEN
HfFolder.save_token(lowercase_ )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] ):
try:
delete_repo(token=cls._token , repo_id="""test-generation-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""" )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Tuple = GenerationConfig(
do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""test-generation-config""" , use_auth_token=self._token )
lowercase_ : List[Any] = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-generation-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowercase_ , repo_id="""test-generation-config""" , push_to_hub=lowercase_ , use_auth_token=self._token )
lowercase_ : int = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : List[Any] = GenerationConfig(
do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token )
lowercase_ : Optional[Any] = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowercase_ , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=lowercase_ , use_auth_token=self._token )
lowercase_ : int = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
| 21
|
'''simple docstring'''
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] ) -> List[Any]:
# Initialise PyTorch model
lowercase_ : List[str] = FunnelConfig.from_json_file(UpperCAmelCase__ )
print(F'''Building PyTorch model from configuration: {config}''' )
lowercase_ : Dict = FunnelBaseModel(UpperCAmelCase__ ) if base_model else FunnelModel(UpperCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , UpperCAmelCase__ )
if __name__ == "__main__":
_lowercase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not."
)
_lowercase : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 21
| 1
|
'''simple docstring'''
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 21
|
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
_lowercase : Optional[List[str]] = None
_lowercase : str = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
_lowercase : Optional[int] = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class __magic_name__ :
UpperCamelCase__ = True
UpperCamelCase__ = None
# Automatically constructed
UpperCamelCase__ = "PIL.Image.Image"
UpperCamelCase__ = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()})
UpperCamelCase__ = field(default='''Image''', init=_UpperCAmelCase, repr=_UpperCAmelCase)
def __call__( self : Tuple ):
return self.pa_type
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if isinstance(lowercase_ , lowercase_ ):
lowercase_ : int = np.array(lowercase_ )
if isinstance(lowercase_ , lowercase_ ):
return {"path": value, "bytes": None}
elif isinstance(lowercase_ , lowercase_ ):
return {"path": None, "bytes": value}
elif isinstance(lowercase_ , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(lowercase_ )
elif isinstance(lowercase_ , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(lowercase_ )
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : dict , lowercase_ : List[str]=None ):
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Image(decode=True) instead.""" )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support decoding images, please install 'Pillow'.""" )
if token_per_repo_id is None:
lowercase_ : Union[str, Any] = {}
lowercase_ , lowercase_ : List[Any] = value["""path"""], value["""bytes"""]
if bytes_ is None:
if path is None:
raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(lowercase_ ):
lowercase_ : int = PIL.Image.open(lowercase_ )
else:
lowercase_ : str = path.split("""::""" )[-1]
try:
lowercase_ : Any = string_to_dict(lowercase_ , config.HUB_DATASETS_URL )["""repo_id"""]
lowercase_ : Optional[Any] = token_per_repo_id.get(lowercase_ )
except ValueError:
lowercase_ : str = None
with xopen(lowercase_ , """rb""" , use_auth_token=lowercase_ ) as f:
lowercase_ : Dict = BytesIO(f.read() )
lowercase_ : Optional[Any] = PIL.Image.open(bytes_ )
else:
lowercase_ : Any = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def SCREAMING_SNAKE_CASE_ ( self : int ):
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
if pa.types.is_string(storage.type ):
lowercase_ : str = pa.array([None] * len(lowercase_ ) , type=pa.binary() )
lowercase_ : Any = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
lowercase_ : str = pa.array([None] * len(lowercase_ ) , type=pa.string() )
lowercase_ : Any = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
lowercase_ : Optional[int] = storage.field("""bytes""" )
else:
lowercase_ : Optional[Any] = pa.array([None] * len(lowercase_ ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
lowercase_ : Dict = storage.field("""path""" )
else:
lowercase_ : int = pa.array([None] * len(lowercase_ ) , type=pa.string() )
lowercase_ : Dict = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
lowercase_ : Optional[int] = pa.array(
[encode_np_array(np.array(lowercase_ ) )["""bytes"""] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
lowercase_ : Tuple = pa.array([None] * len(lowercase_ ) , type=pa.string() )
lowercase_ : Tuple = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(lowercase_ , self.pa_type )
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(lowercase_ : Optional[Any] ):
with xopen(lowercase_ , """rb""" ) as f:
lowercase_ : int = f.read()
return bytes_
lowercase_ : Optional[Any] = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowercase_ : Any = pa.array(
[os.path.basename(lowercase_ ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
lowercase_ : Dict = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(lowercase_ , self.pa_type )
def lowerCamelCase ( ) -> List[str]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
lowercase_ : int = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def lowerCamelCase ( UpperCAmelCase__ : "PIL.Image.Image" ) -> bytes:
lowercase_ : Tuple = BytesIO()
if image.format in list_image_compression_formats():
lowercase_ : int = image.format
else:
lowercase_ : int = """PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF"""
image.save(UpperCAmelCase__ , format=UpperCAmelCase__ )
return buffer.getvalue()
def lowerCamelCase ( UpperCAmelCase__ : "PIL.Image.Image" ) -> dict:
if hasattr(UpperCAmelCase__ , """filename""" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(UpperCAmelCase__ )}
def lowerCamelCase ( UpperCAmelCase__ : np.ndarray ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
lowercase_ : List[Any] = array.dtype
lowercase_ : int = dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER
lowercase_ : Dict = dtype.kind
lowercase_ : List[Any] = dtype.itemsize
lowercase_ : Any = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
lowercase_ : int = np.dtype("""|u1""" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
lowercase_ : str = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
lowercase_ : str = dtype_byteorder + dtype_kind + str(UpperCAmelCase__ )
lowercase_ : Optional[Any] = np.dtype(UpperCAmelCase__ )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
lowercase_ : Optional[int] = PIL.Image.fromarray(array.astype(UpperCAmelCase__ ) )
return {"path": None, "bytes": image_to_bytes(UpperCAmelCase__ )}
def lowerCamelCase ( UpperCAmelCase__ : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if objs:
lowercase_ , lowercase_ : Dict = first_non_null_value(UpperCAmelCase__ )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(UpperCAmelCase__ , np.ndarray ):
lowercase_ : Union[str, Any] = no_op_if_value_is_null(UpperCAmelCase__ )
return [obj_to_image_dict_func(UpperCAmelCase__ ) for obj in objs]
elif isinstance(UpperCAmelCase__ , PIL.Image.Image ):
lowercase_ : int = no_op_if_value_is_null(UpperCAmelCase__ )
return [obj_to_image_dict_func(UpperCAmelCase__ ) for obj in objs]
else:
return objs
else:
return objs
| 21
| 1
|
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
_lowercase : Union[str, Any] = logging.getLogger(__name__)
@dataclass
class __magic_name__ :
UpperCamelCase__ = field(
default=128, metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
}, )
UpperCamelCase__ = field(
default=_UpperCAmelCase, metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''})
UpperCamelCase__ = field(
default=_UpperCAmelCase, metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
}, )
UpperCamelCase__ = field(
default=_UpperCAmelCase, metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
}, )
UpperCamelCase__ = field(
default=_UpperCAmelCase, metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
}, )
UpperCamelCase__ = field(
default=_UpperCAmelCase, metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of prediction examples to this '''
'''value if set.'''
)
}, )
@dataclass
class __magic_name__ :
UpperCamelCase__ = field(
default=_UpperCAmelCase, metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''})
UpperCamelCase__ = field(
default=_UpperCAmelCase, metadata={'''help''': '''Evaluation language. Also train language if `train_language` is set to None.'''})
UpperCamelCase__ = field(
default=_UpperCAmelCase, metadata={'''help''': '''Train language if it is different from the evaluation language.'''})
UpperCamelCase__ = field(
default=_UpperCAmelCase, metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''})
UpperCamelCase__ = field(
default=_UpperCAmelCase, metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''})
UpperCamelCase__ = field(
default=_UpperCAmelCase, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, )
UpperCamelCase__ = field(
default=_UpperCAmelCase, metadata={'''help''': '''arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'''}, )
UpperCamelCase__ = field(
default=_UpperCAmelCase, metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''}, )
UpperCamelCase__ = field(
default='''main''', metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''}, )
UpperCamelCase__ = field(
default=_UpperCAmelCase, metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
}, )
UpperCamelCase__ = field(
default=_UpperCAmelCase, metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''}, )
def lowerCamelCase ( ) -> List[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase_ : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowercase_ , lowercase_ , lowercase_ : Optional[int] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_xnli""" , UpperCAmelCase__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase_ : Optional[int] = training_args.get_process_log_level()
logger.setLevel(UpperCAmelCase__ )
datasets.utils.logging.set_verbosity(UpperCAmelCase__ )
transformers.utils.logging.set_verbosity(UpperCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
lowercase_ : Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase_ : Optional[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
lowercase_ : int = load_dataset(
"""xnli""" , model_args.language , split="""train""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
lowercase_ : int = load_dataset(
"""xnli""" , model_args.train_language , split="""train""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowercase_ : Union[str, Any] = train_dataset.features["""label"""].names
if training_args.do_eval:
lowercase_ : str = load_dataset(
"""xnli""" , model_args.language , split="""validation""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowercase_ : int = eval_dataset.features["""label"""].names
if training_args.do_predict:
lowercase_ : List[str] = load_dataset(
"""xnli""" , model_args.language , split="""test""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowercase_ : List[Any] = predict_dataset.features["""label"""].names
# Labels
lowercase_ : Dict = len(UpperCAmelCase__ )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase_ : Dict = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCAmelCase__ , idalabel={str(UpperCAmelCase__ ): label for i, label in enumerate(UpperCAmelCase__ )} , labelaid={label: i for i, label in enumerate(UpperCAmelCase__ )} , finetuning_task="""xnli""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase_ : Optional[int] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase_ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
lowercase_ : Dict = """max_length"""
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowercase_ : Any = False
def preprocess_function(UpperCAmelCase__ : Tuple ):
# Tokenize the texts
return tokenizer(
examples["""premise"""] , examples["""hypothesis"""] , padding=UpperCAmelCase__ , max_length=data_args.max_seq_length , truncation=UpperCAmelCase__ , )
if training_args.do_train:
if data_args.max_train_samples is not None:
lowercase_ : Optional[int] = min(len(UpperCAmelCase__ ) , data_args.max_train_samples )
lowercase_ : Optional[Any] = train_dataset.select(range(UpperCAmelCase__ ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
lowercase_ : Tuple = train_dataset.map(
UpperCAmelCase__ , batched=UpperCAmelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on train dataset""" , )
# Log a few random samples from the training set:
for index in random.sample(range(len(UpperCAmelCase__ ) ) , 3 ):
logger.info(F'''Sample {index} of the training set: {train_dataset[index]}.''' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowercase_ : str = min(len(UpperCAmelCase__ ) , data_args.max_eval_samples )
lowercase_ : Tuple = eval_dataset.select(range(UpperCAmelCase__ ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
lowercase_ : Dict = eval_dataset.map(
UpperCAmelCase__ , batched=UpperCAmelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on validation dataset""" , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
lowercase_ : str = min(len(UpperCAmelCase__ ) , data_args.max_predict_samples )
lowercase_ : Optional[int] = predict_dataset.select(range(UpperCAmelCase__ ) )
with training_args.main_process_first(desc="""prediction dataset map pre-processing""" ):
lowercase_ : Dict = predict_dataset.map(
UpperCAmelCase__ , batched=UpperCAmelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on prediction dataset""" , )
# Get the metric function
lowercase_ : Optional[Any] = evaluate.load("""xnli""" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(UpperCAmelCase__ : EvalPrediction ):
lowercase_ : int = p.predictions[0] if isinstance(p.predictions , UpperCAmelCase__ ) else p.predictions
lowercase_ : Union[str, Any] = np.argmax(UpperCAmelCase__ , axis=1 )
return metric.compute(predictions=UpperCAmelCase__ , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowercase_ : List[str] = default_data_collator
elif training_args.fpaa:
lowercase_ : Tuple = DataCollatorWithPadding(UpperCAmelCase__ , pad_to_multiple_of=8 )
else:
lowercase_ : Optional[int] = None
# Initialize our Trainer
lowercase_ : int = Trainer(
model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , )
# Training
if training_args.do_train:
lowercase_ : int = None
if training_args.resume_from_checkpoint is not None:
lowercase_ : Tuple = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase_ : List[str] = last_checkpoint
lowercase_ : Tuple = trainer.train(resume_from_checkpoint=UpperCAmelCase__ )
lowercase_ : Tuple = train_result.metrics
lowercase_ : Union[str, Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCAmelCase__ )
)
lowercase_ : str = min(UpperCAmelCase__ , len(UpperCAmelCase__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("""train""" , UpperCAmelCase__ )
trainer.save_metrics("""train""" , UpperCAmelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowercase_ : List[Any] = trainer.evaluate(eval_dataset=UpperCAmelCase__ )
lowercase_ : Tuple = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCAmelCase__ )
lowercase_ : List[str] = min(UpperCAmelCase__ , len(UpperCAmelCase__ ) )
trainer.log_metrics("""eval""" , UpperCAmelCase__ )
trainer.save_metrics("""eval""" , UpperCAmelCase__ )
# Prediction
if training_args.do_predict:
logger.info("""*** Predict ***""" )
lowercase_ , lowercase_ , lowercase_ : str = trainer.predict(UpperCAmelCase__ , metric_key_prefix="""predict""" )
lowercase_ : Optional[int] = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(UpperCAmelCase__ )
)
lowercase_ : Optional[Any] = min(UpperCAmelCase__ , len(UpperCAmelCase__ ) )
trainer.log_metrics("""predict""" , UpperCAmelCase__ )
trainer.save_metrics("""predict""" , UpperCAmelCase__ )
lowercase_ : List[str] = np.argmax(UpperCAmelCase__ , axis=1 )
lowercase_ : List[Any] = os.path.join(training_args.output_dir , """predictions.txt""" )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , """w""" ) as writer:
writer.write("""index\tprediction\n""" )
for index, item in enumerate(UpperCAmelCase__ ):
lowercase_ : List[Any] = label_list[item]
writer.write(F'''{index}\t{item}\n''' )
if __name__ == "__main__":
main()
| 21
|
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def lowerCamelCase ( UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : int ) -> float:
lowercase_ : List[Any] = x
lowercase_ : Any = y
for step in range(UpperCAmelCase__ ): # noqa: B007
lowercase_ : Dict = a * a - b * b + x
lowercase_ : str = 2 * a * b + y
lowercase_ : Optional[Any] = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def lowerCamelCase ( UpperCAmelCase__ : float ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def lowerCamelCase ( UpperCAmelCase__ : float ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(UpperCAmelCase__ , 1 , 1 ) )
def lowerCamelCase ( UpperCAmelCase__ : int = 800 , UpperCAmelCase__ : int = 600 , UpperCAmelCase__ : float = -0.6 , UpperCAmelCase__ : float = 0 , UpperCAmelCase__ : float = 3.2 , UpperCAmelCase__ : int = 50 , UpperCAmelCase__ : bool = True , ) -> Image.Image:
lowercase_ : Union[str, Any] = Image.new("""RGB""" , (image_width, image_height) )
lowercase_ : Tuple = img.load()
# loop through the image-coordinates
for image_x in range(UpperCAmelCase__ ):
for image_y in range(UpperCAmelCase__ ):
# determine the figure-coordinates based on the image-coordinates
lowercase_ : Any = figure_width / image_width * image_height
lowercase_ : Tuple = figure_center_x + (image_x / image_width - 0.5) * figure_width
lowercase_ : Union[str, Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height
lowercase_ : str = get_distance(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
lowercase_ : List[Any] = get_color_coded_rgb(UpperCAmelCase__ )
else:
lowercase_ : Dict = get_black_and_white_rgb(UpperCAmelCase__ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
_lowercase : List[str] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 21
| 1
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
_lowercase : int = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_lowercase : str = {
"vocab_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"
),
"google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt",
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"
),
"google/electra-base-generator": (
"https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"
),
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"
),
},
}
_lowercase : Dict = {
"google/electra-small-generator": 512,
"google/electra-base-generator": 512,
"google/electra-large-generator": 512,
"google/electra-small-discriminator": 512,
"google/electra-base-discriminator": 512,
"google/electra-large-discriminator": 512,
}
_lowercase : Any = {
"google/electra-small-generator": {"do_lower_case": True},
"google/electra-base-generator": {"do_lower_case": True},
"google/electra-large-generator": {"do_lower_case": True},
"google/electra-small-discriminator": {"do_lower_case": True},
"google/electra-base-discriminator": {"do_lower_case": True},
"google/electra-large-discriminator": {"do_lower_case": True},
}
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = ElectraTokenizer
def __init__( self : int , lowercase_ : List[str]=None , lowercase_ : Optional[Any]=None , lowercase_ : Optional[Any]=True , lowercase_ : Tuple="[UNK]" , lowercase_ : Tuple="[SEP]" , lowercase_ : Dict="[PAD]" , lowercase_ : List[Any]="[CLS]" , lowercase_ : int="[MASK]" , lowercase_ : Optional[Any]=True , lowercase_ : Dict=None , **lowercase_ : Optional[int] , ):
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , do_lower_case=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , tokenize_chinese_chars=lowercase_ , strip_accents=lowercase_ , **lowercase_ , )
lowercase_ : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowercase_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowercase_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowercase_ ) != tokenize_chinese_chars
):
lowercase_ : str = getattr(lowercase_ , normalizer_state.pop("""type""" ) )
lowercase_ : Optional[int] = do_lower_case
lowercase_ : Dict = strip_accents
lowercase_ : Union[str, Any] = tokenize_chinese_chars
lowercase_ : int = normalizer_class(**lowercase_ )
lowercase_ : str = do_lower_case
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : int , lowercase_ : Optional[Any]=None ):
lowercase_ : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ):
lowercase_ : Tuple = [self.sep_token_id]
lowercase_ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : str , lowercase_ : Optional[str] = None ):
lowercase_ : int = self._tokenizer.model.save(lowercase_ , name=lowercase_ )
return tuple(lowercase_ )
| 21
|
'''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = DistilBertTokenizer
UpperCamelCase__ = DistilBertTokenizerFast
UpperCamelCase__ = True
@slow
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : int = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" )
lowercase_ : str = tokenizer.encode("""sequence builders""" , add_special_tokens=lowercase_ )
lowercase_ : Optional[int] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowercase_ )
lowercase_ : Dict = tokenizer.build_inputs_with_special_tokens(lowercase_ )
lowercase_ : Tuple = tokenizer.build_inputs_with_special_tokens(lowercase_ , lowercase_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 21
| 1
|
'''simple docstring'''
class __magic_name__ :
def __init__( self : Optional[Any] ):
lowercase_ : int = """"""
lowercase_ : str = """"""
lowercase_ : Dict = []
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : int , lowercase_ : int ):
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
lowercase_ : int = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
lowercase_ : Union[str, Any] = self.__min_dist_top_down_dp(lowercase_ , n - 1 )
lowercase_ : List[Any] = self.__min_dist_top_down_dp(m - 1 , lowercase_ )
lowercase_ : List[str] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
lowercase_ : Optional[Any] = 1 + min(lowercase_ , lowercase_ , lowercase_ )
return self.dp[m][n]
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : str , lowercase_ : str ):
lowercase_ : Any = worda
lowercase_ : Union[str, Any] = worda
lowercase_ : Optional[int] = [[-1 for _ in range(len(lowercase_ ) )] for _ in range(len(lowercase_ ) )]
return self.__min_dist_top_down_dp(len(lowercase_ ) - 1 , len(lowercase_ ) - 1 )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : str , lowercase_ : str ):
lowercase_ : Any = worda
lowercase_ : int = worda
lowercase_ : str = len(lowercase_ )
lowercase_ : int = len(lowercase_ )
lowercase_ : Tuple = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
lowercase_ : Tuple = j
elif j == 0: # second string is empty
lowercase_ : str = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
lowercase_ : Any = self.dp[i - 1][j - 1]
else:
lowercase_ : str = self.dp[i][j - 1]
lowercase_ : Union[str, Any] = self.dp[i - 1][j]
lowercase_ : List[Any] = self.dp[i - 1][j - 1]
lowercase_ : int = 1 + min(lowercase_ , lowercase_ , lowercase_ )
return self.dp[m][n]
if __name__ == "__main__":
_lowercase : int = EditDistance()
print("****************** Testing Edit Distance DP Algorithm ******************")
print()
_lowercase : List[str] = input("Enter the first string: ").strip()
_lowercase : Union[str, Any] = input("Enter the second string: ").strip()
print()
print(f"""The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}""")
print(f"""The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}""")
print()
print("*************** End of Testing Edit Distance DP Algorithm ***************")
| 21
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
_lowercase : Union[str, Any] = {"tokenization_herbert": ["HerbertTokenizer"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = ["HerbertTokenizerFast"]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
_lowercase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 21
| 1
|
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : str ) -> Union[str, Any]:
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
lowercase_ : List[str] = flax_key_tuple[:-1] + ("""weight""",)
lowercase_ : Union[str, Any] = torch.permute(UpperCAmelCase__ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(UpperCAmelCase__ ):
# linear layer
lowercase_ : Dict = flax_key_tuple[:-1] + ("""weight""",)
lowercase_ : Any = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
lowercase_ : Tuple = flax_key_tuple[:-1] + ("""weight""",)
return flax_key_tuple, flax_tensor
def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any] ) -> Optional[Any]:
if "metadata" in layer:
lowercase_ : Tuple = layer.split("""metadata""" )
lowercase_ : Optional[Any] = """""".join(split_layer[0] )[:-1]
lowercase_ : Union[str, Any] = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )]
elif "kvstore" in layer:
lowercase_ : Union[str, Any] = layer.split("""kvstore""" )
lowercase_ : Union[str, Any] = """""".join(split_layer[0] )[:-1]
lowercase_ : Optional[Any] = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )]
else:
lowercase_ : Any = layer.split("""/""" )
lowercase_ : Any = """/""".join(split_layer[:-1] )
lowercase_ : Optional[int] = (split_layer[-1],)
if "kvstore/path" in layer:
lowercase_ : Any = F'''{switch_checkpoint_path}/{checkpoint_info[layer]}'''
elif "kvstore/driver" in layer:
lowercase_ : List[str] = """file"""
else:
lowercase_ : Tuple = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict ) -> Union[str, Any]:
lowercase_ : Tuple = rename_keys(UpperCAmelCase__ )
lowercase_ : Optional[Any] = {}
for k, v in current_block.items():
lowercase_ : str = v
lowercase_ : List[Any] = new_current_block
torch.save(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : str = WEIGHTS_NAME ) -> str:
lowercase_ : Union[str, Any] = convert_file_size_to_int(UpperCAmelCase__ )
lowercase_ : Any = []
lowercase_ : Union[str, Any] = {}
lowercase_ : Any = 0
lowercase_ : Optional[Any] = 0
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp:
lowercase_ : int = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""]
lowercase_ : Union[str, Any] = flatten_dict(UpperCAmelCase__ , sep="""/""" )
lowercase_ : Optional[Any] = {}
for layer in checkpoint_info.keys():
lowercase_ , lowercase_ , lowercase_ : List[str] = get_key_and_tensorstore_dict(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if curr_real_layer_name in all_layers:
lowercase_ : List[Any] = content
else:
lowercase_ : Optional[Any] = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
lowercase_ : List[str] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
lowercase_ : int = torch.tensor(UpperCAmelCase__ )
lowercase_ : Optional[Any] = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
lowercase_ , lowercase_ : Union[str, Any] = rename_base_flax_keys(tuple(key.split("""/""" ) ) , UpperCAmelCase__ )
lowercase_ : List[str] = """/""".join(UpperCAmelCase__ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
lowercase_ : Dict = os.path.join(
UpperCAmelCase__ , weights_name.replace(""".bin""" , F'''-{len(UpperCAmelCase__ )+1:05d}-of-???.bin''' ) )
rename_and_save_block(UpperCAmelCase__ , UpperCAmelCase__ )
sharded_state_dicts.append(current_block.keys() )
del current_block
lowercase_ : Optional[int] = {}
lowercase_ : Union[str, Any] = 0
lowercase_ : str = raw_weights.to(getattr(UpperCAmelCase__ , UpperCAmelCase__ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
lowercase_ : int = os.path.join(UpperCAmelCase__ , weights_name.replace(""".bin""" , F'''-{len(UpperCAmelCase__ )+1:05d}-of-???.bin''' ) )
rename_and_save_block(UpperCAmelCase__ , UpperCAmelCase__ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(UpperCAmelCase__ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
lowercase_ : int = {}
lowercase_ : Tuple = {}
for idx, shard in enumerate(UpperCAmelCase__ ):
lowercase_ : Optional[int] = weights_name.replace(
""".bin""" , F'''-{idx+1:05d}-of-{len(UpperCAmelCase__ ):05d}.bin''' ) # len(sharded_state_dicts):05d}
lowercase_ : Union[str, Any] = os.path.join(UpperCAmelCase__ , weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(UpperCAmelCase__ , os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) )
lowercase_ : Tuple = shard
for key in shard:
lowercase_ : Union[str, Any] = shard_file
# Add the metadata
lowercase_ : Any = {"""total_size""": total_size}
lowercase_ : Optional[Any] = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) , """w""" , encoding="""utf-8""" ) as f:
lowercase_ : Optional[int] = json.dumps(UpperCAmelCase__ , indent=2 , sort_keys=UpperCAmelCase__ ) + """\n"""
f.write(UpperCAmelCase__ )
return metadata, index
if __name__ == "__main__":
_lowercase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size")
parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted",
type=str,
required=False,
help="Path to the output pytorch model.",
)
_lowercase : str = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def lowerCamelCase ( ) -> Optional[int]:
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
lowercase_ : str = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" )
config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" )
lowercase_ : Optional[int] = SwitchTransformersForConditionalGeneration.from_pretrained(
"""/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" )
lowercase_ : str = TaTokenizer.from_pretrained("""t5-small""" )
lowercase_ : List[Any] = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."""
lowercase_ : Optional[Any] = tokenizer(UpperCAmelCase__ , return_tensors="""pt""" ).input_ids
lowercase_ : Union[str, Any] = model.generate(UpperCAmelCase__ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 21
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowercase : Union[str, Any] = {
"configuration_encodec": [
"ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EncodecConfig",
],
"feature_extraction_encodec": ["EncodecFeatureExtractor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] = [
"ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST",
"EncodecModel",
"EncodecPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 21
| 1
|
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = ['''image_processor''', '''tokenizer''']
UpperCamelCase__ = '''FlavaImageProcessor'''
UpperCamelCase__ = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : str , lowercase_ : str=None , lowercase_ : Union[str, Any]=None , **lowercase_ : int ):
lowercase_ : List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowercase_ , )
lowercase_ : Dict = kwargs.pop("""feature_extractor""" )
lowercase_ : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowercase_ , lowercase_ )
lowercase_ : List[str] = self.image_processor
def __call__( self : Tuple , lowercase_ : Optional[ImageInput] = None , lowercase_ : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , lowercase_ : bool = True , lowercase_ : Union[bool, str, PaddingStrategy] = False , lowercase_ : Union[bool, str, TruncationStrategy] = False , lowercase_ : Optional[int] = None , lowercase_ : int = 0 , lowercase_ : Optional[int] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = True , lowercase_ : Optional[Union[str, TensorType]] = None , **lowercase_ : Union[str, Any] , ):
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
lowercase_ : Optional[int] = self.tokenizer(
text=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
if images is not None:
lowercase_ : int = self.image_processor(
lowercase_ , return_image_mask=lowercase_ , return_codebook_pixels=lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
if text is not None and images is not None:
encoding.update(lowercase_ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase_ ) , tensor_type=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str , *lowercase_ : Dict , **lowercase_ : Union[str, Any] ):
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , *lowercase_ : List[Any] , **lowercase_ : str ):
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Optional[Any] = self.tokenizer.model_input_names
lowercase_ : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowercase_ , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowercase_ , )
return self.image_processor
| 21
|
'''simple docstring'''
import os
import numpy
import onnx
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str ) -> Tuple:
lowercase_ : Tuple = a.name
lowercase_ : Tuple = b.name
lowercase_ : Any = """"""
lowercase_ : List[Any] = """"""
lowercase_ : List[Any] = a == b
lowercase_ : Union[str, Any] = name_a
lowercase_ : Optional[Any] = name_b
return res
def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(UpperCAmelCase__ , UpperCAmelCase__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase__ , UpperCAmelCase__ )
_graph_replace_input_with(node_proto.attribute[1].g , UpperCAmelCase__ , UpperCAmelCase__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ) -> int:
for n in graph_proto.node:
_node_replace_input_with(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict ) -> List[str]:
lowercase_ : int = list(model.graph.initializer )
lowercase_ : List[str] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
lowercase_ : Optional[Any] = inits[i].name
lowercase_ : List[str] = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : int ) -> List[str]:
lowercase_ : Dict = os.path.dirname(UpperCAmelCase__ )
lowercase_ : Optional[Any] = os.path.basename(UpperCAmelCase__ )
lowercase_ : str = onnx.load(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) )
lowercase_ : List[Any] = list(model.graph.initializer )
lowercase_ : int = set()
lowercase_ : int = {}
lowercase_ : str = []
lowercase_ : int = 0
for i in range(len(UpperCAmelCase__ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(UpperCAmelCase__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(UpperCAmelCase__ )
dup_set.add(UpperCAmelCase__ )
lowercase_ : Dict = inits[j].data_type
lowercase_ : List[str] = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("""unexpected data type: """ , UpperCAmelCase__ )
total_reduced_size += mem_size
lowercase_ : int = inits[i].name
lowercase_ : List[str] = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(UpperCAmelCase__ )
else:
lowercase_ : Optional[int] = [name_j]
ind_to_replace.append((j, i) )
print("""total reduced size: """ , total_reduced_size / 1024 / 1024 / 1024 , """GB""" )
lowercase_ : Tuple = sorted(UpperCAmelCase__ )
_remove_dup_initializers_from_model(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : Union[str, Any] = """optimized_""" + model_file_name
lowercase_ : Optional[int] = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
onnx.save(UpperCAmelCase__ , UpperCAmelCase__ )
return new_model
| 21
| 1
|
'''simple docstring'''
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
_lowercase : List[str] = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def lowerCamelCase ( UpperCAmelCase__ : Any ) -> Optional[int]:
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple ) -> Optional[Any]:
return max(metric_fn(UpperCAmelCase__ , UpperCAmelCase__ ) for gt in ground_truths )
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int ) -> Dict:
lowercase_ : Dict = [line.strip() for line in open(UpperCAmelCase__ , """r""" ).readlines()]
lowercase_ : Optional[Any] = []
if args.gold_data_mode == "qa":
lowercase_ : Union[str, Any] = pd.read_csv(UpperCAmelCase__ , sep="""\t""" , header=UpperCAmelCase__ )
for answer_list in data[1]:
lowercase_ : Tuple = ast.literal_eval(UpperCAmelCase__ )
answers.append(UpperCAmelCase__ )
else:
lowercase_ : int = [line.strip() for line in open(UpperCAmelCase__ , """r""" ).readlines()]
lowercase_ : Optional[int] = [[reference] for reference in references]
lowercase_ : Optional[int] = 0
for prediction, ground_truths in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
total += 1
em += metric_max_over_ground_truths(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
fa += metric_max_over_ground_truths(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : Optional[int] = 100.0 * em / total
lowercase_ : str = 100.0 * fa / total
logger.info(F'''F1: {fa:.2f}''' )
logger.info(F'''EM: {em:.2f}''' )
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any ) -> Optional[Any]:
lowercase_ : Dict = args.k
lowercase_ : Optional[Any] = [line.strip() for line in open(UpperCAmelCase__ , """r""" ).readlines()]
lowercase_ : Union[str, Any] = [line.strip() for line in open(UpperCAmelCase__ , """r""" ).readlines()]
lowercase_ : Optional[int] = 0
for hypo, reference in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ : List[str] = set(hypo.split("""\t""" )[:k] )
lowercase_ : Optional[Any] = set(reference.split("""\t""" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
lowercase_ : str = 100.0 * em / total
logger.info(F'''Precision@{k}: {em: .2f}''' )
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int ) -> Any:
def strip_title(UpperCAmelCase__ : Dict ):
if title.startswith("""\"""" ):
lowercase_ : Tuple = title[1:]
if title.endswith("""\"""" ):
lowercase_ : Optional[Any] = title[:-1]
return title
lowercase_ : str = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
UpperCAmelCase__ , return_tensors="""pt""" , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , )["""input_ids"""].to(args.device )
lowercase_ : Dict = rag_model.rag.question_encoder(UpperCAmelCase__ )
lowercase_ : Tuple = question_enc_outputs[0]
lowercase_ : Any = rag_model.retriever(
UpperCAmelCase__ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="""pt""" , )
lowercase_ : Optional[int] = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
lowercase_ : Optional[int] = []
for docs in all_docs:
lowercase_ : List[Any] = [strip_title(UpperCAmelCase__ ) for title in docs["""title"""]]
provenance_strings.append("""\t""".join(UpperCAmelCase__ ) )
return provenance_strings
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] ) -> Union[str, Any]:
with torch.no_grad():
lowercase_ : int = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
UpperCAmelCase__ , return_tensors="""pt""" , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ )
lowercase_ : Any = inputs_dict.input_ids.to(args.device )
lowercase_ : List[str] = inputs_dict.attention_mask.to(args.device )
lowercase_ : Optional[Any] = rag_model.generate( # rag_model overwrites generate
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=UpperCAmelCase__ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
lowercase_ : Optional[int] = rag_model.retriever.generator_tokenizer.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )
if args.print_predictions:
for q, a in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
logger.info("""Q: {} - A: {}""".format(UpperCAmelCase__ , UpperCAmelCase__ ) )
return answers
def lowerCamelCase ( ) -> List[str]:
lowercase_ : Any = argparse.ArgumentParser()
parser.add_argument(
"""--model_type""" , choices=["""rag_sequence""", """rag_token""", """bart"""] , type=UpperCAmelCase__ , help=(
"""RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"""
""" model_name_or_path"""
) , )
parser.add_argument(
"""--index_name""" , default=UpperCAmelCase__ , choices=["""exact""", """compressed""", """legacy"""] , type=UpperCAmelCase__ , help="""RAG model retriever type""" , )
parser.add_argument(
"""--index_path""" , default=UpperCAmelCase__ , type=UpperCAmelCase__ , help="""Path to the retrieval index""" , )
parser.add_argument("""--n_docs""" , default=5 , type=UpperCAmelCase__ , help="""Number of retrieved docs""" )
parser.add_argument(
"""--model_name_or_path""" , default=UpperCAmelCase__ , type=UpperCAmelCase__ , required=UpperCAmelCase__ , help="""Path to pretrained checkpoints or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--eval_mode""" , choices=["""e2e""", """retrieval"""] , default="""e2e""" , type=UpperCAmelCase__ , help=(
"""Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"""
""" precision@k."""
) , )
parser.add_argument("""--k""" , default=1 , type=UpperCAmelCase__ , help="""k for the precision@k calculation""" )
parser.add_argument(
"""--evaluation_set""" , default=UpperCAmelCase__ , type=UpperCAmelCase__ , required=UpperCAmelCase__ , help="""Path to a file containing evaluation samples""" , )
parser.add_argument(
"""--gold_data_path""" , default=UpperCAmelCase__ , type=UpperCAmelCase__ , required=UpperCAmelCase__ , help="""Path to a tab-separated file with gold samples""" , )
parser.add_argument(
"""--gold_data_mode""" , default="""qa""" , type=UpperCAmelCase__ , choices=["""qa""", """ans"""] , help=(
"""Format of the gold data file"""
"""qa - a single line in the following format: question [tab] answer_list"""
"""ans - a single line of the gold file contains the expected answer string"""
) , )
parser.add_argument(
"""--predictions_path""" , type=UpperCAmelCase__ , default="""predictions.txt""" , help="""Name of the predictions file, to be stored in the checkpoints directory""" , )
parser.add_argument(
"""--eval_all_checkpoints""" , action="""store_true""" , help="""Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number""" , )
parser.add_argument(
"""--eval_batch_size""" , default=8 , type=UpperCAmelCase__ , help="""Batch size per GPU/CPU for evaluation.""" , )
parser.add_argument(
"""--recalculate""" , help="""Recalculate predictions even if the prediction file exists""" , action="""store_true""" , )
parser.add_argument(
"""--num_beams""" , default=4 , type=UpperCAmelCase__ , help="""Number of beams to be used when generating answers""" , )
parser.add_argument("""--min_length""" , default=1 , type=UpperCAmelCase__ , help="""Min length of the generated answers""" )
parser.add_argument("""--max_length""" , default=50 , type=UpperCAmelCase__ , help="""Max length of the generated answers""" )
parser.add_argument(
"""--print_predictions""" , action="""store_true""" , help="""If True, prints predictions while evaluating.""" , )
parser.add_argument(
"""--print_docs""" , action="""store_true""" , help="""If True, prints docs retried while generating.""" , )
lowercase_ : Optional[Any] = parser.parse_args()
lowercase_ : Optional[Any] = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
return args
def lowerCamelCase ( UpperCAmelCase__ : List[Any] ) -> Optional[Any]:
lowercase_ : Union[str, Any] = {}
if args.model_type is None:
lowercase_ : Optional[int] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("""rag""" ):
lowercase_ : int = RagTokenForGeneration if args.model_type == """rag_token""" else RagSequenceForGeneration
lowercase_ : Optional[Any] = args.n_docs
if args.index_name is not None:
lowercase_ : Optional[Any] = args.index_name
if args.index_path is not None:
lowercase_ : List[Any] = args.index_path
else:
lowercase_ : Dict = BartForConditionalGeneration
lowercase_ : Optional[int] = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("""Evaluate the following checkpoints: %s""" , UpperCAmelCase__ )
lowercase_ : Optional[Any] = get_scores if args.eval_mode == """e2e""" else get_precision_at_k
lowercase_ : List[Any] = evaluate_batch_eae if args.eval_mode == """e2e""" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("""Calculating metrics based on an existing predictions file: {}""".format(args.predictions_path ) )
score_fn(UpperCAmelCase__ , args.predictions_path , args.gold_data_path )
continue
logger.info("""***** Running evaluation for {} *****""".format(UpperCAmelCase__ ) )
logger.info(""" Batch size = %d""" , args.eval_batch_size )
logger.info(""" Predictions will be stored under {}""".format(args.predictions_path ) )
if args.model_type.startswith("""rag""" ):
lowercase_ : List[Any] = RagRetriever.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase_ : Dict = model_class.from_pretrained(UpperCAmelCase__ , retriever=UpperCAmelCase__ , **UpperCAmelCase__ )
model.retriever.init_retrieval()
else:
lowercase_ : Union[str, Any] = model_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
model.to(args.device )
with open(args.evaluation_set , """r""" ) as eval_file, open(args.predictions_path , """w""" ) as preds_file:
lowercase_ : int = []
for line in tqdm(UpperCAmelCase__ ):
questions.append(line.strip() )
if len(UpperCAmelCase__ ) == args.eval_batch_size:
lowercase_ : Any = evaluate_batch_fn(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
preds_file.write("""\n""".join(UpperCAmelCase__ ) + """\n""" )
preds_file.flush()
lowercase_ : Optional[Any] = []
if len(UpperCAmelCase__ ) > 0:
lowercase_ : Any = evaluate_batch_fn(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
preds_file.write("""\n""".join(UpperCAmelCase__ ) )
preds_file.flush()
score_fn(UpperCAmelCase__ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
_lowercase : str = get_args()
main(args)
| 21
|
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
_lowercase : str = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase)
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : str , *lowercase_ : Dict , **lowercase_ : List[Any] ):
super().__init__(*lowercase_ , **lowercase_ )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : str=None , lowercase_ : List[Any]=None , lowercase_ : Dict=None ):
lowercase_ : Optional[Any] = {}
lowercase_ : Tuple = {}
if prompt is not None:
lowercase_ : Tuple = prompt
if generate_kwargs is not None:
lowercase_ : List[str] = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
lowercase_ : List[Any] = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
lowercase_ : str = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : List[Any] , lowercase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowercase_ : Optional[int] ):
return super().__call__(lowercase_ , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : List[Any] , lowercase_ : Tuple=None ):
lowercase_ : List[Any] = load_image(lowercase_ )
if prompt is not None:
if not isinstance(lowercase_ , lowercase_ ):
raise ValueError(
f'''Received an invalid text input, got - {type(lowercase_ )} - but expected a single string. '''
"""Note also that one single text can be provided for conditional image to text generation.""" )
lowercase_ : List[Any] = self.model.config.model_type
if model_type == "git":
lowercase_ : Dict = self.image_processor(images=lowercase_ , return_tensors=self.framework )
lowercase_ : Union[str, Any] = self.tokenizer(text=lowercase_ , add_special_tokens=lowercase_ ).input_ids
lowercase_ : int = [self.tokenizer.cls_token_id] + input_ids
lowercase_ : List[Any] = torch.tensor(lowercase_ ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
lowercase_ : Union[str, Any] = self.image_processor(images=lowercase_ , header_text=lowercase_ , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
lowercase_ : Dict = self.image_processor(images=lowercase_ , return_tensors=self.framework )
lowercase_ : List[str] = self.tokenizer(lowercase_ , return_tensors=self.framework )
model_inputs.update(lowercase_ )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
lowercase_ : List[str] = self.image_processor(images=lowercase_ , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
lowercase_ : str = None
return model_inputs
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Dict , lowercase_ : Optional[Any]=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , lowercase_ )
and all(x is None for x in model_inputs["""input_ids"""] )
):
lowercase_ : Any = None
if generate_kwargs is None:
lowercase_ : Optional[Any] = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
lowercase_ : Dict = model_inputs.pop(self.model.main_input_name )
lowercase_ : Any = self.model.generate(lowercase_ , **lowercase_ , **lowercase_ )
return model_outputs
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : List[Any] ):
lowercase_ : List[str] = []
for output_ids in model_outputs:
lowercase_ : Union[str, Any] = {
"""generated_text""": self.tokenizer.decode(
lowercase_ , skip_special_tokens=lowercase_ , )
}
records.append(lowercase_ )
return records
| 21
| 1
|
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
_lowercase : Optional[int] = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
_lowercase : List[Any] = "\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n"
_lowercase : List[Any] = "\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {'mse': 0.6123724356957945}\n\n If you're using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mse': array([0.41666667, 1. ])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class __magic_name__ ( datasets.Metric):
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"""
] , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("""float""" ) ),
"references": datasets.Sequence(datasets.Value("""float""" ) ),
}
else:
return {
"predictions": datasets.Value("""float""" ),
"references": datasets.Value("""float""" ),
}
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : int=None , lowercase_ : Tuple="uniform_average" , lowercase_ : Dict=True ):
lowercase_ : Optional[int] = mean_squared_error(
lowercase_ , lowercase_ , sample_weight=lowercase_ , multioutput=lowercase_ , squared=lowercase_ )
return {"mse": mse}
| 21
|
'''simple docstring'''
class __magic_name__ :
def __init__( self : int , lowercase_ : list ):
lowercase_ : Dict = set_counts
lowercase_ : List[Any] = max(lowercase_ )
lowercase_ : str = len(lowercase_ )
lowercase_ : str = [1] * num_sets
lowercase_ : Dict = list(range(lowercase_ ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int , lowercase_ : int ):
lowercase_ : List[Any] = self.get_parent(lowercase_ )
lowercase_ : Union[str, Any] = self.get_parent(lowercase_ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
lowercase_ : List[str] = 0
lowercase_ : Optional[int] = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
lowercase_ : int = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
lowercase_ : int = 0
lowercase_ : List[Any] = src_parent
lowercase_ : List[Any] = self.set_counts[src_parent]
lowercase_ : Tuple = max(self.max_set , lowercase_ )
return True
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : int ):
if self.parents[disj_set] == disj_set:
return disj_set
lowercase_ : int = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 21
| 1
|
'''simple docstring'''
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Union[str, Any] = logging.get_logger(__name__)
_lowercase : Any = {
"snap-research/efficientformer-l1-300": (
"https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"
),
}
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''efficientformer'''
def __init__( self : Optional[int] , lowercase_ : List[int] = [3, 2, 6, 4] , lowercase_ : List[int] = [48, 96, 224, 448] , lowercase_ : List[bool] = [True, True, True, True] , lowercase_ : int = 448 , lowercase_ : int = 32 , lowercase_ : int = 4 , lowercase_ : int = 7 , lowercase_ : int = 5 , lowercase_ : int = 8 , lowercase_ : int = 4 , lowercase_ : float = 0.0 , lowercase_ : int = 16 , lowercase_ : int = 3 , lowercase_ : int = 3 , lowercase_ : int = 3 , lowercase_ : int = 2 , lowercase_ : int = 1 , lowercase_ : float = 0.0 , lowercase_ : int = 1 , lowercase_ : bool = True , lowercase_ : bool = True , lowercase_ : float = 1E-5 , lowercase_ : str = "gelu" , lowercase_ : float = 0.02 , lowercase_ : float = 1E-12 , lowercase_ : int = 224 , lowercase_ : float = 1E-05 , **lowercase_ : Tuple , ):
super().__init__(**lowercase_ )
lowercase_ : int = hidden_act
lowercase_ : List[Any] = hidden_dropout_prob
lowercase_ : str = hidden_sizes
lowercase_ : int = num_hidden_layers
lowercase_ : List[Any] = num_attention_heads
lowercase_ : Any = initializer_range
lowercase_ : List[Any] = layer_norm_eps
lowercase_ : int = patch_size
lowercase_ : str = num_channels
lowercase_ : Optional[int] = depths
lowercase_ : Union[str, Any] = mlp_expansion_ratio
lowercase_ : Any = downsamples
lowercase_ : Union[str, Any] = dim
lowercase_ : List[str] = key_dim
lowercase_ : Optional[int] = attention_ratio
lowercase_ : Optional[int] = resolution
lowercase_ : Tuple = pool_size
lowercase_ : Optional[Any] = downsample_patch_size
lowercase_ : str = downsample_stride
lowercase_ : Any = downsample_pad
lowercase_ : List[Any] = drop_path_rate
lowercase_ : Union[str, Any] = num_metaad_blocks
lowercase_ : Tuple = distillation
lowercase_ : Dict = use_layer_scale
lowercase_ : List[str] = layer_scale_init_value
lowercase_ : Optional[Any] = image_size
lowercase_ : List[str] = batch_norm_eps
| 21
|
'''simple docstring'''
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
_lowercase : str = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase)
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : str , *lowercase_ : int , **lowercase_ : Any ):
super().__init__(*lowercase_ , **lowercase_ )
requires_backends(self , """decord""" )
self.check_model_type(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : str=None , lowercase_ : Union[str, Any]=None , lowercase_ : List[Any]=None ):
lowercase_ : Union[str, Any] = {}
if frame_sampling_rate is not None:
lowercase_ : Any = frame_sampling_rate
if num_frames is not None:
lowercase_ : Optional[Any] = num_frames
lowercase_ : Union[str, Any] = {}
if top_k is not None:
lowercase_ : Optional[Any] = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : str , lowercase_ : Union[str, List[str]] , **lowercase_ : str ):
return super().__call__(lowercase_ , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str]=None , lowercase_ : Optional[int]=1 ):
if num_frames is None:
lowercase_ : List[Any] = self.model.config.num_frames
if video.startswith("""http://""" ) or video.startswith("""https://""" ):
lowercase_ : Union[str, Any] = BytesIO(requests.get(lowercase_ ).content )
lowercase_ : Optional[Any] = VideoReader(lowercase_ )
videoreader.seek(0 )
lowercase_ : Tuple = 0
lowercase_ : List[Any] = num_frames * frame_sampling_rate - 1
lowercase_ : Optional[int] = np.linspace(lowercase_ , lowercase_ , num=lowercase_ , dtype=np.intaa )
lowercase_ : Optional[int] = videoreader.get_batch(lowercase_ ).asnumpy()
lowercase_ : Union[str, Any] = list(lowercase_ )
lowercase_ : Optional[Any] = self.image_processor(lowercase_ , return_tensors=self.framework )
return model_inputs
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : str ):
lowercase_ : int = self.model(**lowercase_ )
return model_outputs
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : List[Any] , lowercase_ : Dict=5 ):
if top_k > self.model.config.num_labels:
lowercase_ : List[Any] = self.model.config.num_labels
if self.framework == "pt":
lowercase_ : str = model_outputs.logits.softmax(-1 )[0]
lowercase_ , lowercase_ : Optional[Any] = probs.topk(lowercase_ )
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
lowercase_ : Union[str, Any] = scores.tolist()
lowercase_ : Tuple = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowercase_ , lowercase_ )]
| 21
| 1
|
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : List[Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_lowercase : Optional[Any] = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
_lowercase : Any = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
_lowercase : Optional[Any] = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
_lowercase : Optional[int] = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
_lowercase : Optional[int] = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
_lowercase : Tuple = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
_lowercase : List[Any] = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
_lowercase : Dict = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
_lowercase : List[str] = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_lowercase : Optional[int] = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
_lowercase : Union[str, Any] = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
_lowercase : int = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(_UpperCAmelCase)
class __magic_name__ :
def __call__( self : int , lowercase_ : int , lowercase_ : Optional[str] = None , lowercase_ : Optional[str] = None , lowercase_ : Union[bool, str] = False , lowercase_ : Union[bool, str] = False , lowercase_ : Optional[int] = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : Optional[bool] = None , **lowercase_ : int , ):
if titles is None and texts is None:
return super().__call__(
lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , return_tensors=lowercase_ , return_attention_mask=lowercase_ , **lowercase_ , )
elif titles is None or texts is None:
lowercase_ : Tuple = titles if texts is None else texts
return super().__call__(
lowercase_ , lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , return_tensors=lowercase_ , return_attention_mask=lowercase_ , **lowercase_ , )
lowercase_ : List[Any] = titles if not isinstance(lowercase_ , lowercase_ ) else [titles]
lowercase_ : Dict = texts if not isinstance(lowercase_ , lowercase_ ) else [texts]
lowercase_ : List[Any] = len(lowercase_ )
lowercase_ : Optional[Any] = questions if not isinstance(lowercase_ , lowercase_ ) else [questions] * n_passages
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(
f'''There should be as many titles than texts but got {len(lowercase_ )} titles and {len(lowercase_ )} texts.''' )
lowercase_ : Any = super().__call__(lowercase_ , lowercase_ , padding=lowercase_ , truncation=lowercase_ )["""input_ids"""]
lowercase_ : str = super().__call__(lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ )["""input_ids"""]
lowercase_ : List[Any] = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowercase_ , lowercase_ )
]
}
if return_attention_mask is not False:
lowercase_ : int = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowercase_ : int = attention_mask
return self.pad(lowercase_ , padding=lowercase_ , max_length=lowercase_ , return_tensors=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : BatchEncoding , lowercase_ : DPRReaderOutput , lowercase_ : int = 16 , lowercase_ : int = 64 , lowercase_ : int = 4 , ):
lowercase_ : Optional[int] = reader_input["""input_ids"""]
lowercase_ , lowercase_ , lowercase_ : Optional[int] = reader_output[:3]
lowercase_ : Union[str, Any] = len(lowercase_ )
lowercase_ : List[Any] = sorted(range(lowercase_ ) , reverse=lowercase_ , key=relevance_logits.__getitem__ )
lowercase_ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
lowercase_ : Optional[Any] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowercase_ : Any = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowercase_ : Any = sequence_ids.index(self.pad_token_id )
else:
lowercase_ : Dict = len(lowercase_ )
lowercase_ : Any = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowercase_ , top_spans=lowercase_ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowercase_ , start_index=lowercase_ , end_index=lowercase_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(lowercase_ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : List[int] , lowercase_ : List[int] , lowercase_ : int , lowercase_ : int , ):
lowercase_ : Tuple = []
for start_index, start_score in enumerate(lowercase_ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowercase_ : str = sorted(lowercase_ , key=lambda lowercase_ : x[1] , reverse=lowercase_ )
lowercase_ : Dict = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f'''Wrong span indices: [{start_index}:{end_index}]''' )
lowercase_ : Optional[int] = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f'''Span is too long: {length} > {max_answer_length}''' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowercase_ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_UpperCAmelCase)
class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = READER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = READER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase__ = ['''input_ids''', '''attention_mask''']
| 21
|
'''simple docstring'''
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] ) -> List[str]:
if isinstance(UpperCAmelCase__ , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class __magic_name__ :
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Any , lowercase_ : str ):
pass
def SCREAMING_SNAKE_CASE_ ( self : str ):
pass
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
pass
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : np.ndarray , lowercase_ : np.ndarray , lowercase_ : float ):
lowercase_ : Optional[Any] = np.abs((a - b) ).max()
self.assertLessEqual(lowercase_ , lowercase_ , f'''Difference between torch and flax is {diff} (>= {tol}).''' )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : Tuple=None , **lowercase_ : Optional[int] ):
lowercase_ : Any = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase_ , lowercase_ )
lowercase_ : Any = FlaxVisionTextDualEncoderModel(lowercase_ )
lowercase_ : List[Any] = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : List[str] , lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : List[Any]=None , **lowercase_ : Tuple ):
lowercase_ , lowercase_ : Any = self.get_vision_text_model(lowercase_ , lowercase_ )
lowercase_ : Optional[int] = {"""vision_model""": vision_model, """text_model""": text_model}
lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase_ )
lowercase_ : List[Any] = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : str , lowercase_ : Dict , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : Optional[Any]=None , **lowercase_ : int ):
lowercase_ , lowercase_ : Union[str, Any] = self.get_vision_text_model(lowercase_ , lowercase_ )
lowercase_ : Optional[Any] = {"""vision_model""": vision_model, """text_model""": text_model}
lowercase_ : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase_ )
lowercase_ : Tuple = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ )
lowercase_ : Any = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase_ )
lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained(lowercase_ )
lowercase_ : List[str] = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ )
lowercase_ : Union[str, Any] = after_output[0]
lowercase_ : str = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowercase_ , 1E-3 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : Dict=None , **lowercase_ : Optional[Any] ):
lowercase_ , lowercase_ : Optional[int] = self.get_vision_text_model(lowercase_ , lowercase_ )
lowercase_ : Dict = {"""vision_model""": vision_model, """text_model""": text_model}
lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase_ )
lowercase_ : Optional[int] = model(
input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , output_attentions=lowercase_ )
lowercase_ : Tuple = output.vision_model_output.attentions
self.assertEqual(len(lowercase_ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase_ : List[str] = to_atuple(vision_model.config.image_size )
lowercase_ : Optional[Any] = to_atuple(vision_model.config.patch_size )
lowercase_ : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowercase_ : Optional[Any] = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowercase_ : Union[str, Any] = output.text_model_output.attentions
self.assertEqual(len(lowercase_ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : int ):
pt_model.to(lowercase_ )
pt_model.eval()
# prepare inputs
lowercase_ : int = inputs_dict
lowercase_ : Tuple = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
lowercase_ : str = pt_model(**lowercase_ ).to_tuple()
lowercase_ : Optional[Any] = fx_model(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowercase_ , pt_output.numpy() , 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowercase_ )
lowercase_ : Tuple = FlaxVisionTextDualEncoderModel.from_pretrained(lowercase_ , from_pt=lowercase_ )
lowercase_ : Dict = fx_model_loaded(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowercase_ , pt_output.numpy() , 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowercase_ )
lowercase_ : Union[str, Any] = VisionTextDualEncoderModel.from_pretrained(lowercase_ , from_flax=lowercase_ )
pt_model_loaded.to(lowercase_ )
pt_model_loaded.eval()
with torch.no_grad():
lowercase_ : List[Any] = pt_model_loaded(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(lowercase_ , pt_output_loaded.numpy() , 4E-2 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Any , lowercase_ : str , lowercase_ : Union[str, Any] ):
lowercase_ : Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase_ , lowercase_ )
lowercase_ : List[Any] = VisionTextDualEncoderModel(lowercase_ )
lowercase_ : Union[str, Any] = FlaxVisionTextDualEncoderModel(lowercase_ )
lowercase_ : Optional[Any] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowercase_ )
lowercase_ : Tuple = fx_state
self.check_pt_flax_equivalence(lowercase_ , lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : List[Any] ):
lowercase_ : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase_ , lowercase_ )
lowercase_ : int = VisionTextDualEncoderModel(lowercase_ )
lowercase_ : Dict = FlaxVisionTextDualEncoderModel(lowercase_ )
lowercase_ : Optional[Any] = load_flax_weights_in_pytorch_model(lowercase_ , fx_model.params )
self.check_pt_flax_equivalence(lowercase_ , lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : Tuple = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : List[Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : List[Any] = self.prepare_config_and_inputs()
self.check_save_load(**lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Union[str, Any] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowercase_ )
@is_pt_flax_cross_test
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Tuple = self.prepare_config_and_inputs()
lowercase_ : List[Any] = config_inputs_dict.pop("""vision_config""" )
lowercase_ : int = config_inputs_dict.pop("""text_config""" )
lowercase_ : Optional[int] = config_inputs_dict
self.check_equivalence_pt_to_flax(lowercase_ , lowercase_ , lowercase_ )
self.check_equivalence_flax_to_pt(lowercase_ , lowercase_ , lowercase_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ , lowercase_ : str = self.get_pretrained_model_and_inputs()
lowercase_ : Dict = model_a(**lowercase_ )
lowercase_ : str = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowercase_ )
lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained(lowercase_ )
lowercase_ : str = model_a(**lowercase_ )
lowercase_ : Union[str, Any] = after_outputs[0]
lowercase_ : Any = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowercase_ , 1E-5 )
@require_flax
class __magic_name__ ( _UpperCAmelCase, unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=lowercase_ , text_from_pt=lowercase_ , )
lowercase_ : List[str] = 13
lowercase_ : Optional[Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowercase_ : Any = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
lowercase_ : str = random_attention_mask([batch_size, 4] )
lowercase_ : List[str] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : List[Any] , lowercase_ : Tuple ):
lowercase_ : Union[str, Any] = FlaxViTModel(lowercase_ )
lowercase_ : Dict = FlaxBertModel(lowercase_ )
return vision_model, text_model
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : Any = FlaxViTModelTester(self )
lowercase_ : Optional[Any] = FlaxBertModelTester(self )
lowercase_ : Dict = vit_model_tester.prepare_config_and_inputs()
lowercase_ : Optional[Any] = bert_model_tester.prepare_config_and_inputs()
lowercase_ , lowercase_ : List[str] = vision_config_and_inputs
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class __magic_name__ ( _UpperCAmelCase, unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-clip""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=lowercase_ , text_from_pt=lowercase_ , )
lowercase_ : List[str] = 13
lowercase_ : Optional[Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowercase_ : int = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
lowercase_ : Tuple = random_attention_mask([batch_size, 4] )
lowercase_ : Union[str, Any] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] ):
lowercase_ : Tuple = FlaxCLIPVisionModel(lowercase_ )
lowercase_ : Any = FlaxBertModel(lowercase_ )
return vision_model, text_model
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Union[str, Any] = FlaxCLIPVisionModelTester(self )
lowercase_ : Tuple = FlaxBertModelTester(self )
lowercase_ : Union[str, Any] = clip_model_tester.prepare_config_and_inputs()
lowercase_ : Any = bert_model_tester.prepare_config_and_inputs()
lowercase_ , lowercase_ : Optional[Any] = vision_config_and_inputs
lowercase_ , lowercase_ , lowercase_ , lowercase_ : str = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class __magic_name__ ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : List[str] = FlaxVisionTextDualEncoderModel.from_pretrained("""clip-italian/clip-italian""" , logit_scale_init_value=1.0 )
lowercase_ : Optional[Any] = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
lowercase_ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowercase_ : Optional[int] = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=lowercase_ , padding=lowercase_ , return_tensors="""np""" )
lowercase_ : List[str] = model(**lowercase_ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
lowercase_ : Optional[Any] = np.array([[1.2_28_47_27, 0.3_10_41_22]] )
self.assertTrue(np.allclose(outputs.logits_per_image , lowercase_ , atol=1E-3 ) )
| 21
| 1
|
'''simple docstring'''
def lowerCamelCase ( ) -> Dict:
lowercase_ : Union[str, Any] = []
lowercase_ : Tuple = 1
while len(UpperCAmelCase__ ) < 1e6:
constant.append(str(UpperCAmelCase__ ) )
i += 1
lowercase_ : int = """""".join(UpperCAmelCase__ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution())
| 21
|
'''simple docstring'''
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class __magic_name__ ( unittest.TestCase):
def __init__( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : int=7 , lowercase_ : Optional[Any]=3 , lowercase_ : Optional[Any]=18 , lowercase_ : List[Any]=30 , lowercase_ : int=400 , lowercase_ : Dict=True , lowercase_ : List[Any]=None , lowercase_ : Dict=True , ):
lowercase_ : Tuple = size if size is not None else {"""height""": 18, """width""": 18}
lowercase_ : List[str] = parent
lowercase_ : Any = batch_size
lowercase_ : Optional[Any] = num_channels
lowercase_ : Tuple = image_size
lowercase_ : Optional[Any] = min_resolution
lowercase_ : Dict = max_resolution
lowercase_ : Optional[int] = do_resize
lowercase_ : Optional[Any] = size
lowercase_ : Union[str, Any] = do_normalize
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04],
[-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class __magic_name__ ( _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = ImageGPTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Optional[int] = ImageGPTImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , """clusters""" ) )
self.assertTrue(hasattr(lowercase_ , """do_resize""" ) )
self.assertTrue(hasattr(lowercase_ , """size""" ) )
self.assertTrue(hasattr(lowercase_ , """do_normalize""" ) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
lowercase_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : int = self.image_processing_class(**self.image_processor_dict )
lowercase_ : Union[str, Any] = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase_ , obj[key] ) )
else:
self.assertEqual(obj[key] , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : str = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase_ : Union[str, Any] = os.path.join(lowercase_ , """image_processor.json""" )
image_processor_first.to_json_file(lowercase_ )
lowercase_ : Optional[Any] = self.image_processing_class.from_json_file(lowercase_ ).to_dict()
lowercase_ : Any = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowercase_ )
lowercase_ : Any = self.image_processing_class.from_pretrained(lowercase_ ).to_dict()
lowercase_ : List[str] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowercase_ )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
pass
def lowerCamelCase ( ) -> Any:
lowercase_ : Union[str, Any] = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" )
lowercase_ : Any = Image.open(dataset[4]["""file"""] )
lowercase_ : Dict = Image.open(dataset[5]["""file"""] )
lowercase_ : int = [imagea, imagea]
return images
@require_vision
@require_torch
class __magic_name__ ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Optional[Any] = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
lowercase_ : Optional[int] = prepare_images()
# test non-batched
lowercase_ : str = image_processing(images[0] , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1024) )
lowercase_ : Tuple = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , lowercase_ )
# test batched
lowercase_ : List[str] = image_processing(lowercase_ , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1024) )
lowercase_ : Union[str, Any] = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowercase_ )
| 21
| 1
|
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
_lowercase : Dict = sys.version_info >= (3, 10)
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : List[Any]=None ) -> List[Any]:
return field(default_factory=lambda: default , metadata=UpperCAmelCase__ )
@dataclass
class __magic_name__ :
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 42
@dataclass
class __magic_name__ :
UpperCamelCase__ = 42
UpperCamelCase__ = field(default='''toto''', metadata={'''help''': '''help message'''})
@dataclass
class __magic_name__ :
UpperCamelCase__ = False
UpperCamelCase__ = True
UpperCamelCase__ = None
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''titi'''
UpperCamelCase__ = '''toto'''
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''titi'''
UpperCamelCase__ = '''toto'''
UpperCamelCase__ = 42
@dataclass
class __magic_name__ :
UpperCamelCase__ = "toto"
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : Optional[Any] = BasicEnum(self.foo )
@dataclass
class __magic_name__ :
UpperCamelCase__ = "toto"
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Optional[Any] = MixedTypeEnum(self.foo )
@dataclass
class __magic_name__ :
UpperCamelCase__ = None
UpperCamelCase__ = field(default=_UpperCAmelCase, metadata={'''help''': '''help message'''})
UpperCamelCase__ = None
UpperCamelCase__ = list_field(default=[])
UpperCamelCase__ = list_field(default=[])
@dataclass
class __magic_name__ :
UpperCamelCase__ = list_field(default=[])
UpperCamelCase__ = list_field(default=[1, 2, 3])
UpperCamelCase__ = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''])
UpperCamelCase__ = list_field(default=[0.1, 0.2, 0.3])
@dataclass
class __magic_name__ :
UpperCamelCase__ = field()
UpperCamelCase__ = field()
UpperCamelCase__ = field()
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : List[Any] = BasicEnum(self.required_enum )
@dataclass
class __magic_name__ :
UpperCamelCase__ = 42
UpperCamelCase__ = field()
UpperCamelCase__ = None
UpperCamelCase__ = field(default='''toto''', metadata={'''help''': '''help message'''})
UpperCamelCase__ = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''])
if is_python_no_less_than_3_10:
@dataclass
class __magic_name__ :
UpperCamelCase__ = False
UpperCamelCase__ = True
UpperCamelCase__ = None
@dataclass
class __magic_name__ :
UpperCamelCase__ = None
UpperCamelCase__ = field(default=_UpperCAmelCase, metadata={'''help''': '''help message'''})
UpperCamelCase__ = None
UpperCamelCase__ = list_field(default=[])
UpperCamelCase__ = list_field(default=[])
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : argparse.ArgumentParser , lowercase_ : argparse.ArgumentParser ):
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
lowercase_ : str = {k: v for k, v in vars(lowercase_ ).items() if k != """container"""}
lowercase_ : int = {k: v for k, v in vars(lowercase_ ).items() if k != """container"""}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("""choices""" , lowercase_ ) and yy.get("""choices""" , lowercase_ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["""type"""](lowercase_ ) , yy["""type"""](lowercase_ ) )
del xx["type"], yy["type"]
self.assertEqual(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Dict = HfArgumentParser(lowercase_ )
lowercase_ : Tuple = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=lowercase_ , required=lowercase_ )
expected.add_argument("""--bar""" , type=lowercase_ , required=lowercase_ )
expected.add_argument("""--baz""" , type=lowercase_ , required=lowercase_ )
expected.add_argument("""--flag""" , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs="""?""" )
self.argparsersEqual(lowercase_ , lowercase_ )
lowercase_ : int = ["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""]
((lowercase_) , ) : Union[str, Any] = parser.parse_args_into_dataclasses(lowercase_ , look_for_args_file=lowercase_ )
self.assertFalse(example.flag )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : Dict = HfArgumentParser(lowercase_ )
lowercase_ : List[Any] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=42 , type=lowercase_ )
expected.add_argument("""--baz""" , default="""toto""" , type=lowercase_ , help="""help message""" )
self.argparsersEqual(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : List[str] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs="""?""" )
expected.add_argument("""--baz""" , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs="""?""" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("""--no_baz""" , action="""store_false""" , default=lowercase_ , dest="""baz""" )
expected.add_argument("""--opt""" , type=lowercase_ , default=lowercase_ )
lowercase_ : int = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase_ )
for dataclass_type in dataclass_types:
lowercase_ : Optional[Any] = HfArgumentParser(lowercase_ )
self.argparsersEqual(lowercase_ , lowercase_ )
lowercase_ : Optional[Any] = parser.parse_args([] )
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_ ) )
lowercase_ : List[Any] = parser.parse_args(["""--foo""", """--no_baz"""] )
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_ ) )
lowercase_ : Optional[int] = parser.parse_args(["""--foo""", """--baz"""] )
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_ ) )
lowercase_ : Tuple = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] )
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_ ) )
lowercase_ : Any = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] )
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_ ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : Dict = HfArgumentParser(lowercase_ )
lowercase_ : Optional[int] = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=["""titi""", """toto""", 42] , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(lowercase_ , lowercase_ )
lowercase_ : Union[str, Any] = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
lowercase_ : int = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
lowercase_ : str = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
lowercase_ : Tuple = parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
lowercase_ : Union[str, Any] = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
lowercase_ : Union[str, Any] = parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
@dataclass
class __magic_name__ :
UpperCamelCase__ = "toto"
lowercase_ : Optional[Any] = HfArgumentParser(lowercase_ )
lowercase_ : Any = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=("""titi""", """toto""", 42) , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(lowercase_ , lowercase_ )
lowercase_ : List[Any] = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
lowercase_ : Optional[Any] = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
lowercase_ : Optional[int] = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : int = HfArgumentParser(lowercase_ )
lowercase_ : List[str] = argparse.ArgumentParser()
expected.add_argument("""--foo_int""" , nargs="""+""" , default=[] , type=lowercase_ )
expected.add_argument("""--bar_int""" , nargs="""+""" , default=[1, 2, 3] , type=lowercase_ )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=lowercase_ )
expected.add_argument("""--foo_float""" , nargs="""+""" , default=[0.1, 0.2, 0.3] , type=lowercase_ )
self.argparsersEqual(lowercase_ , lowercase_ )
lowercase_ : Dict = parser.parse_args([] )
self.assertEqual(
lowercase_ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["""Hallo""", """Bonjour""", """Hello"""] , foo_float=[0.1, 0.2, 0.3] ) , )
lowercase_ : List[str] = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() )
self.assertEqual(lowercase_ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["""a""", """b""", """c"""] , foo_float=[0.1, 0.7] ) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : Dict = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=lowercase_ , type=lowercase_ )
expected.add_argument("""--bar""" , default=lowercase_ , type=lowercase_ , help="""help message""" )
expected.add_argument("""--baz""" , default=lowercase_ , type=lowercase_ )
expected.add_argument("""--ces""" , nargs="""+""" , default=[] , type=lowercase_ )
expected.add_argument("""--des""" , nargs="""+""" , default=[] , type=lowercase_ )
lowercase_ : Dict = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase_ )
for dataclass_type in dataclass_types:
lowercase_ : Any = HfArgumentParser(lowercase_ )
self.argparsersEqual(lowercase_ , lowercase_ )
lowercase_ : Dict = parser.parse_args([] )
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , bar=lowercase_ , baz=lowercase_ , ces=[] , des=[] ) )
lowercase_ : int = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() )
self.assertEqual(lowercase_ , Namespace(foo=12 , bar=3.14 , baz="""42""" , ces=["""a""", """b""", """c"""] , des=[1, 2, 3] ) )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : List[str] = HfArgumentParser(lowercase_ )
lowercase_ : Optional[Any] = argparse.ArgumentParser()
expected.add_argument("""--required_list""" , nargs="""+""" , type=lowercase_ , required=lowercase_ )
expected.add_argument("""--required_str""" , type=lowercase_ , required=lowercase_ )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=lowercase_ , )
self.argparsersEqual(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : Any = HfArgumentParser(lowercase_ )
lowercase_ : Tuple = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=lowercase_ , required=lowercase_ )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=lowercase_ , )
expected.add_argument("""--opt""" , type=lowercase_ , default=lowercase_ )
expected.add_argument("""--baz""" , default="""toto""" , type=lowercase_ , help="""help message""" )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=lowercase_ )
self.argparsersEqual(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Any = HfArgumentParser(lowercase_ )
lowercase_ : List[str] = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
lowercase_ : str = parser.parse_dict(lowercase_ )[0]
lowercase_ : Any = BasicExample(**lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : str = HfArgumentParser(lowercase_ )
lowercase_ : Any = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
"""extra""": 42,
}
self.assertRaises(lowercase_ , parser.parse_dict , lowercase_ , allow_extra_keys=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Optional[int] = HfArgumentParser(lowercase_ )
lowercase_ : Optional[Any] = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : List[str] = os.path.join(lowercase_ , """temp_json""" )
os.mkdir(lowercase_ )
with open(temp_local_path + """.json""" , """w+""" ) as f:
json.dump(lowercase_ , lowercase_ )
lowercase_ : Union[str, Any] = parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0]
lowercase_ : Union[str, Any] = BasicExample(**lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Dict = HfArgumentParser(lowercase_ )
lowercase_ : int = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : Any = os.path.join(lowercase_ , """temp_yaml""" )
os.mkdir(lowercase_ )
with open(temp_local_path + """.yaml""" , """w+""" ) as f:
yaml.dump(lowercase_ , lowercase_ )
lowercase_ : Any = parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0]
lowercase_ : Any = BasicExample(**lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : List[str] = HfArgumentParser(lowercase_ )
self.assertIsNotNone(lowercase_ )
| 21
|
'''simple docstring'''
def lowerCamelCase ( ) -> Dict:
lowercase_ : Union[str, Any] = []
lowercase_ : Tuple = 1
while len(UpperCAmelCase__ ) < 1e6:
constant.append(str(UpperCAmelCase__ ) )
i += 1
lowercase_ : int = """""".join(UpperCAmelCase__ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution())
| 21
| 1
|
'''simple docstring'''
from __future__ import annotations
from math import ceil, floor, sqrt
def lowerCamelCase ( UpperCAmelCase__ : int = 2000000 ) -> int:
lowercase_ : list[int] = [0]
lowercase_ : int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
lowercase_ : int = 0
# the area corresponding to the grid that gives the product closest to target
lowercase_ : int = 0
# an estimate of b, using the quadratic formula
lowercase_ : float
# the largest integer less than b_estimate
lowercase_ : int
# the largest integer less than b_estimate
lowercase_ : int
# the triangle number corresponding to b_floor
lowercase_ : int
# the triangle number corresponding to b_ceil
lowercase_ : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
lowercase_ : Union[str, Any] = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
lowercase_ : Optional[Any] = floor(UpperCAmelCase__ )
lowercase_ : int = ceil(UpperCAmelCase__ )
lowercase_ : List[Any] = triangle_numbers[b_floor]
lowercase_ : List[Any] = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
lowercase_ : int = triangle_b_first_guess * triangle_a
lowercase_ : List[str] = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
lowercase_ : str = triangle_b_second_guess * triangle_a
lowercase_ : Any = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f"""{solution() = }""")
| 21
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
_lowercase : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : str ):
super().__init__()
self.register_modules(unet=lowercase_ , scheduler=lowercase_ )
@torch.no_grad()
def __call__( self : List[str] , lowercase_ : int = 1 , lowercase_ : int = 100 , lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ : Optional[float] = None , lowercase_ : bool = True , ):
if audio_length_in_s is None:
lowercase_ : List[Any] = self.unet.config.sample_size / self.unet.config.sample_rate
lowercase_ : Dict = audio_length_in_s * self.unet.config.sample_rate
lowercase_ : Any = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
lowercase_ : List[Any] = int(lowercase_ )
if sample_size % down_scale_factor != 0:
lowercase_ : int = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
""" process.""" )
lowercase_ : Any = int(lowercase_ )
lowercase_ : List[str] = next(iter(self.unet.parameters() ) ).dtype
lowercase_ : List[str] = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowercase_ : Any = randn_tensor(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_ )
# set step values
self.scheduler.set_timesteps(lowercase_ , device=audio.device )
lowercase_ : Optional[Any] = self.scheduler.timesteps.to(lowercase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowercase_ : Dict = self.unet(lowercase_ , lowercase_ ).sample
# 2. compute previous image: x_t -> t_t-1
lowercase_ : List[str] = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample
lowercase_ : str = audio.clamp(-1 , 1 ).float().cpu().numpy()
lowercase_ : Union[str, Any] = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowercase_ )
| 21
| 1
|
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] ) -> str:
lowercase_ : List[str] = [1]
for i in range(2 , UpperCAmelCase__ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
lowercase_ : int = []
lowercase_ : Tuple = list(range(UpperCAmelCase__ ) )
# Find permutation
while factorials:
lowercase_ : List[str] = factorials.pop()
lowercase_ , lowercase_ : str = divmod(UpperCAmelCase__ , UpperCAmelCase__ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21
|
'''simple docstring'''
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
_lowercase : Union[str, Any] = "src/transformers"
_lowercase : str = "docs/source/en"
_lowercase : Union[str, Any] = "."
def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] ) -> int:
with open(UpperCAmelCase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowercase_ : Union[str, Any] = f.readlines()
# Find the start prompt.
lowercase_ : Optional[Any] = 0
while not lines[start_index].startswith(UpperCAmelCase__ ):
start_index += 1
start_index += 1
lowercase_ : int = start_index
while not lines[end_index].startswith(UpperCAmelCase__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
_lowercase : int = "Model|Encoder|Decoder|ForConditionalGeneration"
# Regexes that match TF/Flax/PT model names.
_lowercase : str = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
_lowercase : Optional[Any] = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_lowercase : int = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# This is to make sure the transformers module imported is the one in the repo.
_lowercase : Optional[Any] = direct_transformers_import(TRANSFORMERS_PATH)
def lowerCamelCase ( UpperCAmelCase__ : int ) -> Any:
lowercase_ : str = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , UpperCAmelCase__ )
return [m.group(0 ) for m in matches]
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple ) -> List[Any]:
lowercase_ : Dict = 2 if text == """✅""" or text == """❌""" else len(UpperCAmelCase__ )
lowercase_ : List[str] = (width - text_length) // 2
lowercase_ : Dict = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def lowerCamelCase ( ) -> Any:
lowercase_ : int = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
lowercase_ : Any = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
lowercase_ : int = {name: config.replace("""Config""" , """""" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
lowercase_ : List[Any] = collections.defaultdict(UpperCAmelCase__ )
lowercase_ : List[str] = collections.defaultdict(UpperCAmelCase__ )
lowercase_ : Any = collections.defaultdict(UpperCAmelCase__ )
lowercase_ : Tuple = collections.defaultdict(UpperCAmelCase__ )
lowercase_ : Optional[int] = collections.defaultdict(UpperCAmelCase__ )
# Let's lookup through all transformers object (once).
for attr_name in dir(UpperCAmelCase__ ):
lowercase_ : Union[str, Any] = None
if attr_name.endswith("""Tokenizer""" ):
lowercase_ : Optional[int] = slow_tokenizers
lowercase_ : Union[str, Any] = attr_name[:-9]
elif attr_name.endswith("""TokenizerFast""" ):
lowercase_ : Optional[Any] = fast_tokenizers
lowercase_ : Dict = attr_name[:-13]
elif _re_tf_models.match(UpperCAmelCase__ ) is not None:
lowercase_ : str = tf_models
lowercase_ : str = _re_tf_models.match(UpperCAmelCase__ ).groups()[0]
elif _re_flax_models.match(UpperCAmelCase__ ) is not None:
lowercase_ : List[str] = flax_models
lowercase_ : int = _re_flax_models.match(UpperCAmelCase__ ).groups()[0]
elif _re_pt_models.match(UpperCAmelCase__ ) is not None:
lowercase_ : Tuple = pt_models
lowercase_ : Optional[int] = _re_pt_models.match(UpperCAmelCase__ ).groups()[0]
if lookup_dict is not None:
while len(UpperCAmelCase__ ) > 0:
if attr_name in model_name_to_prefix.values():
lowercase_ : int = True
break
# Try again after removing the last word in the name
lowercase_ : Optional[Any] = """""".join(camel_case_split(UpperCAmelCase__ )[:-1] )
# Let's build that table!
lowercase_ : Dict = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
lowercase_ : Optional[Any] = ["""Model""", """Tokenizer slow""", """Tokenizer fast""", """PyTorch support""", """TensorFlow support""", """Flax Support"""]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
lowercase_ : Union[str, Any] = [len(UpperCAmelCase__ ) + 2 for c in columns]
lowercase_ : int = max([len(UpperCAmelCase__ ) for name in model_names] ) + 2
# Build the table per se
lowercase_ : Tuple = """|""" + """|""".join([_center_text(UpperCAmelCase__ , UpperCAmelCase__ ) for c, w in zip(UpperCAmelCase__ , UpperCAmelCase__ )] ) + """|\n"""
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([""":""" + """-""" * (w - 2) + """:""" for w in widths] ) + "|\n"
lowercase_ : int = {True: """✅""", False: """❌"""}
for name in model_names:
lowercase_ : str = model_name_to_prefix[name]
lowercase_ : Any = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(UpperCAmelCase__ , UpperCAmelCase__ ) for l, w in zip(UpperCAmelCase__ , UpperCAmelCase__ )] ) + "|\n"
return table
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any]=False ) -> str:
lowercase_ , lowercase_ , lowercase_ , lowercase_ : List[str] = _find_text_in_file(
filename=os.path.join(UpperCAmelCase__ , """index.md""" ) , start_prompt="""<!--This table is updated automatically from the auto modules""" , end_prompt="""<!-- End table-->""" , )
lowercase_ : Dict = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(UpperCAmelCase__ , """index.md""" ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"""The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.""" )
if __name__ == "__main__":
_lowercase : Any = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_lowercase : Optional[Any] = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 21
| 1
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def lowerCamelCase ( UpperCAmelCase__ : NDArray[floataa] , UpperCAmelCase__ : NDArray[floataa] , UpperCAmelCase__ : list[int] , UpperCAmelCase__ : int , ) -> list[float]:
lowercase_ , lowercase_ : Optional[Any] = coefficient_matrix.shape
lowercase_ , lowercase_ : Optional[int] = constant_matrix.shape
if rowsa != colsa:
lowercase_ : Optional[int] = F'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'''
raise ValueError(UpperCAmelCase__ )
if colsa != 1:
lowercase_ : Any = F'''Constant matrix must be nx1 but received {rowsa}x{colsa}'''
raise ValueError(UpperCAmelCase__ )
if rowsa != rowsa:
lowercase_ : Tuple = (
"""Coefficient and constant matrices dimensions must be nxn and nx1 but """
F'''received {rowsa}x{colsa} and {rowsa}x{colsa}'''
)
raise ValueError(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) != rowsa:
lowercase_ : Dict = (
"""Number of initial values must be equal to number of rows in coefficient """
F'''matrix but received {len(UpperCAmelCase__ )} and {rowsa}'''
)
raise ValueError(UpperCAmelCase__ )
if iterations <= 0:
raise ValueError("""Iterations must be at least 1""" )
lowercase_ : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
lowercase_ , lowercase_ : str = table.shape
strictly_diagonally_dominant(UpperCAmelCase__ )
# Iterates the whole matrix for given number of times
for _ in range(UpperCAmelCase__ ):
lowercase_ : List[Any] = []
for row in range(UpperCAmelCase__ ):
lowercase_ : Optional[Any] = 0
for col in range(UpperCAmelCase__ ):
if col == row:
lowercase_ : Tuple = table[row][col]
elif col == cols - 1:
lowercase_ : Optional[int] = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
lowercase_ : Dict = (temp + val) / denom
new_val.append(UpperCAmelCase__ )
lowercase_ : List[str] = new_val
return [float(UpperCAmelCase__ ) for i in new_val]
def lowerCamelCase ( UpperCAmelCase__ : NDArray[floataa] ) -> bool:
lowercase_ , lowercase_ : Union[str, Any] = table.shape
lowercase_ : Optional[int] = True
for i in range(0 , UpperCAmelCase__ ):
lowercase_ : List[str] = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("""Coefficient matrix is not strictly diagonally dominant""" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21
|
'''simple docstring'''
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class __magic_name__ ( ctypes.Structure):
# _fields is a specific attr expected by ctypes
UpperCamelCase__ = [('''size''', ctypes.c_int), ('''visible''', ctypes.c_byte)]
def lowerCamelCase ( ) -> List[Any]:
if os.name == "nt":
lowercase_ : List[Any] = CursorInfo()
lowercase_ : int = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) )
lowercase_ : List[str] = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25l""" )
sys.stdout.flush()
def lowerCamelCase ( ) -> str:
if os.name == "nt":
lowercase_ : int = CursorInfo()
lowercase_ : Optional[Any] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) )
lowercase_ : Optional[int] = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25h""" )
sys.stdout.flush()
@contextmanager
def lowerCamelCase ( ) -> Any:
try:
hide_cursor()
yield
finally:
show_cursor()
| 21
| 1
|
'''simple docstring'''
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> Any:
def get_masked_lm_array(UpperCAmelCase__ : str ):
lowercase_ : Optional[Any] = F'''masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase_ : List[str] = tf.train.load_variable(UpperCAmelCase__ , UpperCAmelCase__ )
if "kernel" in name:
lowercase_ : str = array.transpose()
return torch.from_numpy(UpperCAmelCase__ )
def get_encoder_array(UpperCAmelCase__ : str ):
lowercase_ : Dict = F'''encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase_ : Tuple = tf.train.load_variable(UpperCAmelCase__ , UpperCAmelCase__ )
if "kernel" in name:
lowercase_ : Optional[int] = array.transpose()
return torch.from_numpy(UpperCAmelCase__ )
def get_encoder_layer_array(UpperCAmelCase__ : int , UpperCAmelCase__ : str ):
lowercase_ : Optional[Any] = F'''encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase_ : Dict = tf.train.load_variable(UpperCAmelCase__ , UpperCAmelCase__ )
if "kernel" in name:
lowercase_ : str = array.transpose()
return torch.from_numpy(UpperCAmelCase__ )
def get_encoder_attention_layer_array(UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any] ):
lowercase_ : str = F'''encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase_ : Tuple = tf.train.load_variable(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : Optional[Any] = array.reshape(UpperCAmelCase__ )
if "kernel" in name:
lowercase_ : Optional[int] = array.transpose()
return torch.from_numpy(UpperCAmelCase__ )
print(F'''Loading model based on config from {config_path}...''' )
lowercase_ : Optional[Any] = BertConfig.from_json_file(UpperCAmelCase__ )
lowercase_ : List[Any] = BertForMaskedLM(UpperCAmelCase__ )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
lowercase_ : BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
lowercase_ : BertSelfAttention = layer.attention.self
lowercase_ : Union[str, Any] = get_encoder_attention_layer_array(
UpperCAmelCase__ , """_query_dense/kernel""" , self_attn.query.weight.data.shape )
lowercase_ : str = get_encoder_attention_layer_array(
UpperCAmelCase__ , """_query_dense/bias""" , self_attn.query.bias.data.shape )
lowercase_ : List[Any] = get_encoder_attention_layer_array(
UpperCAmelCase__ , """_key_dense/kernel""" , self_attn.key.weight.data.shape )
lowercase_ : str = get_encoder_attention_layer_array(
UpperCAmelCase__ , """_key_dense/bias""" , self_attn.key.bias.data.shape )
lowercase_ : Tuple = get_encoder_attention_layer_array(
UpperCAmelCase__ , """_value_dense/kernel""" , self_attn.value.weight.data.shape )
lowercase_ : str = get_encoder_attention_layer_array(
UpperCAmelCase__ , """_value_dense/bias""" , self_attn.value.bias.data.shape )
# Self-attention Output
lowercase_ : BertSelfOutput = layer.attention.output
lowercase_ : List[Any] = get_encoder_attention_layer_array(
UpperCAmelCase__ , """_output_dense/kernel""" , self_output.dense.weight.data.shape )
lowercase_ : Union[str, Any] = get_encoder_attention_layer_array(
UpperCAmelCase__ , """_output_dense/bias""" , self_output.dense.bias.data.shape )
lowercase_ : int = get_encoder_layer_array(UpperCAmelCase__ , """_attention_layer_norm/gamma""" )
lowercase_ : Optional[Any] = get_encoder_layer_array(UpperCAmelCase__ , """_attention_layer_norm/beta""" )
# Intermediate
lowercase_ : BertIntermediate = layer.intermediate
lowercase_ : List[Any] = get_encoder_layer_array(UpperCAmelCase__ , """_intermediate_dense/kernel""" )
lowercase_ : Any = get_encoder_layer_array(UpperCAmelCase__ , """_intermediate_dense/bias""" )
# Output
lowercase_ : BertOutput = layer.output
lowercase_ : Optional[int] = get_encoder_layer_array(UpperCAmelCase__ , """_output_dense/kernel""" )
lowercase_ : Optional[Any] = get_encoder_layer_array(UpperCAmelCase__ , """_output_dense/bias""" )
lowercase_ : Dict = get_encoder_layer_array(UpperCAmelCase__ , """_output_layer_norm/gamma""" )
lowercase_ : Any = get_encoder_layer_array(UpperCAmelCase__ , """_output_layer_norm/beta""" )
# Embeddings
lowercase_ : Union[str, Any] = get_encoder_array("""_position_embedding_layer/embeddings""" )
lowercase_ : Any = get_encoder_array("""_type_embedding_layer/embeddings""" )
lowercase_ : str = get_encoder_array("""_embedding_norm_layer/gamma""" )
lowercase_ : List[Any] = get_encoder_array("""_embedding_norm_layer/beta""" )
# LM Head
lowercase_ : Optional[int] = model.cls.predictions.transform
lowercase_ : str = get_masked_lm_array("""dense/kernel""" )
lowercase_ : List[Any] = get_masked_lm_array("""dense/bias""" )
lowercase_ : int = get_masked_lm_array("""layer_norm/gamma""" )
lowercase_ : Optional[Any] = get_masked_lm_array("""layer_norm/beta""" )
lowercase_ : int = get_masked_lm_array("""embedding_table""" )
# Pooling
lowercase_ : int = BertPooler(config=UpperCAmelCase__ )
lowercase_ : BertPooler = get_encoder_array("""_pooler_layer/kernel""" )
lowercase_ : BertPooler = get_encoder_array("""_pooler_layer/bias""" )
# Export final model
model.save_pretrained(UpperCAmelCase__ )
# Integration test - should load without any errors ;)
lowercase_ : Dict = BertForMaskedLM.from_pretrained(UpperCAmelCase__ )
print(new_model.eval() )
print("""Model conversion was done sucessfully!""" )
if __name__ == "__main__":
_lowercase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model.",
)
_lowercase : Tuple = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 21
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
_lowercase : int = logging.get_logger(__name__)
@dataclass
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self : Optional[Any] , **lowercase_ : int ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowercase_ : Optional[int] = deprecated_arg[3:]
setattr(self , lowercase_ , not kwargs.pop(lowercase_ ) )
logger.warning(
f'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'''
f''' {positive_arg}={kwargs[positive_arg]}''' )
lowercase_ : Tuple = kwargs.pop("""torchscript""" , self.torchscript )
lowercase_ : List[Any] = kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics )
lowercase_ : List[Any] = kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level )
super().__init__(**lowercase_ )
UpperCamelCase__ = field(default=_UpperCAmelCase, metadata={'''help''': '''Trace the models using torchscript'''})
UpperCamelCase__ = field(default=_UpperCAmelCase, metadata={'''help''': '''Print Xla/PyTorch tpu metrics'''})
UpperCamelCase__ = field(
default='''O1''', metadata={
'''help''': (
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '''
'''See details at https://nvidia.github.io/apex/amp.html'''
)
}, )
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
requires_backends(self , ["""torch"""] )
logger.info("""PyTorch: setting up devices""" )
if not self.cuda:
lowercase_ : Optional[Any] = torch.device("""cpu""" )
lowercase_ : Tuple = 0
elif is_torch_tpu_available():
lowercase_ : Optional[int] = xm.xla_device()
lowercase_ : str = 0
else:
lowercase_ : int = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
lowercase_ : str = torch.cuda.device_count()
return device, n_gpu
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
return is_torch_tpu_available() and self.tpu
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
requires_backends(self , ["""torch"""] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
requires_backends(self , ["""torch"""] )
return self._setup_devices[0]
@property
def SCREAMING_SNAKE_CASE_ ( self : int ):
requires_backends(self , ["""torch"""] )
return self._setup_devices[1]
@property
def SCREAMING_SNAKE_CASE_ ( self : int ):
return self.n_gpu > 0
| 21
| 1
|
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
_lowercase : Optional[List[str]] = None
_lowercase : str = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
_lowercase : Optional[int] = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class __magic_name__ :
UpperCamelCase__ = True
UpperCamelCase__ = None
# Automatically constructed
UpperCamelCase__ = "PIL.Image.Image"
UpperCamelCase__ = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()})
UpperCamelCase__ = field(default='''Image''', init=_UpperCAmelCase, repr=_UpperCAmelCase)
def __call__( self : Tuple ):
return self.pa_type
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if isinstance(lowercase_ , lowercase_ ):
lowercase_ : int = np.array(lowercase_ )
if isinstance(lowercase_ , lowercase_ ):
return {"path": value, "bytes": None}
elif isinstance(lowercase_ , lowercase_ ):
return {"path": None, "bytes": value}
elif isinstance(lowercase_ , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(lowercase_ )
elif isinstance(lowercase_ , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(lowercase_ )
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : dict , lowercase_ : List[str]=None ):
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Image(decode=True) instead.""" )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support decoding images, please install 'Pillow'.""" )
if token_per_repo_id is None:
lowercase_ : Union[str, Any] = {}
lowercase_ , lowercase_ : List[Any] = value["""path"""], value["""bytes"""]
if bytes_ is None:
if path is None:
raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(lowercase_ ):
lowercase_ : int = PIL.Image.open(lowercase_ )
else:
lowercase_ : str = path.split("""::""" )[-1]
try:
lowercase_ : Any = string_to_dict(lowercase_ , config.HUB_DATASETS_URL )["""repo_id"""]
lowercase_ : Optional[Any] = token_per_repo_id.get(lowercase_ )
except ValueError:
lowercase_ : str = None
with xopen(lowercase_ , """rb""" , use_auth_token=lowercase_ ) as f:
lowercase_ : Dict = BytesIO(f.read() )
lowercase_ : Optional[Any] = PIL.Image.open(bytes_ )
else:
lowercase_ : Any = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def SCREAMING_SNAKE_CASE_ ( self : int ):
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
if pa.types.is_string(storage.type ):
lowercase_ : str = pa.array([None] * len(lowercase_ ) , type=pa.binary() )
lowercase_ : Any = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
lowercase_ : str = pa.array([None] * len(lowercase_ ) , type=pa.string() )
lowercase_ : Any = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
lowercase_ : Optional[int] = storage.field("""bytes""" )
else:
lowercase_ : Optional[Any] = pa.array([None] * len(lowercase_ ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
lowercase_ : Dict = storage.field("""path""" )
else:
lowercase_ : int = pa.array([None] * len(lowercase_ ) , type=pa.string() )
lowercase_ : Dict = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
lowercase_ : Optional[int] = pa.array(
[encode_np_array(np.array(lowercase_ ) )["""bytes"""] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
lowercase_ : Tuple = pa.array([None] * len(lowercase_ ) , type=pa.string() )
lowercase_ : Tuple = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(lowercase_ , self.pa_type )
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(lowercase_ : Optional[Any] ):
with xopen(lowercase_ , """rb""" ) as f:
lowercase_ : int = f.read()
return bytes_
lowercase_ : Optional[Any] = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowercase_ : Any = pa.array(
[os.path.basename(lowercase_ ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
lowercase_ : Dict = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(lowercase_ , self.pa_type )
def lowerCamelCase ( ) -> List[str]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
lowercase_ : int = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def lowerCamelCase ( UpperCAmelCase__ : "PIL.Image.Image" ) -> bytes:
lowercase_ : Tuple = BytesIO()
if image.format in list_image_compression_formats():
lowercase_ : int = image.format
else:
lowercase_ : int = """PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF"""
image.save(UpperCAmelCase__ , format=UpperCAmelCase__ )
return buffer.getvalue()
def lowerCamelCase ( UpperCAmelCase__ : "PIL.Image.Image" ) -> dict:
if hasattr(UpperCAmelCase__ , """filename""" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(UpperCAmelCase__ )}
def lowerCamelCase ( UpperCAmelCase__ : np.ndarray ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
lowercase_ : List[Any] = array.dtype
lowercase_ : int = dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER
lowercase_ : Dict = dtype.kind
lowercase_ : List[Any] = dtype.itemsize
lowercase_ : Any = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
lowercase_ : int = np.dtype("""|u1""" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
lowercase_ : str = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
lowercase_ : str = dtype_byteorder + dtype_kind + str(UpperCAmelCase__ )
lowercase_ : Optional[Any] = np.dtype(UpperCAmelCase__ )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
lowercase_ : Optional[int] = PIL.Image.fromarray(array.astype(UpperCAmelCase__ ) )
return {"path": None, "bytes": image_to_bytes(UpperCAmelCase__ )}
def lowerCamelCase ( UpperCAmelCase__ : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if objs:
lowercase_ , lowercase_ : Dict = first_non_null_value(UpperCAmelCase__ )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(UpperCAmelCase__ , np.ndarray ):
lowercase_ : Union[str, Any] = no_op_if_value_is_null(UpperCAmelCase__ )
return [obj_to_image_dict_func(UpperCAmelCase__ ) for obj in objs]
elif isinstance(UpperCAmelCase__ , PIL.Image.Image ):
lowercase_ : int = no_op_if_value_is_null(UpperCAmelCase__ )
return [obj_to_image_dict_func(UpperCAmelCase__ ) for obj in objs]
else:
return objs
else:
return objs
| 21
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
def lowerCamelCase ( UpperCAmelCase__ : list ) -> int:
if not postfix_notation:
return 0
lowercase_ : Any = {"""+""", """-""", """*""", """/"""}
lowercase_ : list[Any] = []
for token in postfix_notation:
if token in operations:
lowercase_ , lowercase_ : Dict = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(UpperCAmelCase__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21
| 1
|
'''simple docstring'''
from collections import deque
class __magic_name__ :
def __init__( self : Optional[Any] , lowercase_ : str , lowercase_ : int , lowercase_ : int ):
lowercase_ : Optional[Any] = process_name # process name
lowercase_ : Any = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
lowercase_ : List[Any] = arrival_time
lowercase_ : Any = burst_time # remaining burst time
lowercase_ : Tuple = 0 # total time of the process wait in ready queue
lowercase_ : Any = 0 # time from arrival time to completion time
class __magic_name__ :
def __init__( self : Union[str, Any] , lowercase_ : int , lowercase_ : list[int] , lowercase_ : deque[Process] , lowercase_ : int , ):
# total number of mlfq's queues
lowercase_ : Tuple = number_of_queues
# time slice of queues that round robin algorithm applied
lowercase_ : List[str] = time_slices
# unfinished process is in this ready_queue
lowercase_ : Dict = queue
# current time
lowercase_ : Optional[Any] = current_time
# finished process is in this sequence queue
lowercase_ : deque[Process] = deque()
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : str = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : list[Process] ):
lowercase_ : Tuple = []
for i in range(len(lowercase_ ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : list[Process] ):
lowercase_ : Union[str, Any] = []
for i in range(len(lowercase_ ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : list[Process] ):
lowercase_ : str = []
for i in range(len(lowercase_ ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : deque[Process] ):
return [q.burst_time for q in queue]
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Process ):
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : deque[Process] ):
lowercase_ : deque[Process] = deque() # sequence deque of finished process
while len(lowercase_ ) != 0:
lowercase_ : Union[str, Any] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(lowercase_ )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
lowercase_ : int = 0
# set the process's turnaround time because it is finished
lowercase_ : Optional[Any] = self.current_time - cp.arrival_time
# set the completion time
lowercase_ : Optional[Any] = self.current_time
# add the process to queue that has finished queue
finished.append(lowercase_ )
self.finish_queue.extend(lowercase_ ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : deque[Process] , lowercase_ : int ):
lowercase_ : deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(lowercase_ ) ):
lowercase_ : List[str] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(lowercase_ )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
lowercase_ : List[Any] = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(lowercase_ )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
lowercase_ : str = 0
# set the finish time
lowercase_ : List[str] = self.current_time
# update the process' turnaround time because it is finished
lowercase_ : Dict = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(lowercase_ )
self.finish_queue.extend(lowercase_ ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def SCREAMING_SNAKE_CASE_ ( self : int ):
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
lowercase_ , lowercase_ : Dict = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
_lowercase : Optional[int] = Process("P1", 0, 53)
_lowercase : Any = Process("P2", 0, 17)
_lowercase : Optional[Any] = Process("P3", 0, 68)
_lowercase : Optional[Any] = Process("P4", 0, 24)
_lowercase : Optional[Any] = 3
_lowercase : List[str] = [17, 25]
_lowercase : int = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])})
_lowercase : Dict = Process("P1", 0, 53)
_lowercase : List[str] = Process("P2", 0, 17)
_lowercase : Any = Process("P3", 0, 68)
_lowercase : List[Any] = Process("P4", 0, 24)
_lowercase : str = 3
_lowercase : List[str] = [17, 25]
_lowercase : Optional[Any] = deque([Pa, Pa, Pa, Pa])
_lowercase : Dict = MLFQ(number_of_queues, time_slices, queue, 0)
_lowercase : Union[str, Any] = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f"""waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print completion times of processes(P1, P2, P3, P4)
print(
f"""completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f"""turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print sequence of finished processes
print(
f"""sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"""
)
| 21
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
_lowercase : List[Any] = logging.get_logger(__name__)
def lowerCamelCase ( UpperCAmelCase__ : Union[tf.Tensor, np.ndarray] ) -> List[int]:
if isinstance(UpperCAmelCase__ , np.ndarray ):
return list(tensor.shape )
lowercase_ : Tuple = tf.shape(UpperCAmelCase__ )
if tensor.shape == tf.TensorShape(UpperCAmelCase__ ):
return dynamic
lowercase_ : Dict = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(UpperCAmelCase__ )]
def lowerCamelCase ( UpperCAmelCase__ : tf.Tensor , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[str] = None ) -> tf.Tensor:
return tf.nn.softmax(logits=logits + 1e-9 , axis=UpperCAmelCase__ , name=UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple=1e-5 , UpperCAmelCase__ : List[str]=-1 ) -> List[str]:
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise NotImplementedError("""Only 1D weight and bias tensors are supported for now, with only a single axis.""" )
# Get mean and variance on the axis to be normalized
lowercase_ , lowercase_ : List[str] = tf.nn.moments(UpperCAmelCase__ , axes=[axis] , keepdims=UpperCAmelCase__ )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
lowercase_ : List[Any] = [1] * inputs.shape.rank
lowercase_ : List[str] = shape_list(UpperCAmelCase__ )[axis]
lowercase_ : List[str] = tf.reshape(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : List[Any] = tf.reshape(UpperCAmelCase__ , UpperCAmelCase__ )
# Compute layer normalization using the batch_normalization
# function.
lowercase_ : str = tf.nn.batch_normalization(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , offset=UpperCAmelCase__ , scale=UpperCAmelCase__ , variance_epsilon=UpperCAmelCase__ , )
return outputs
def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple=0 , UpperCAmelCase__ : Any=-1 ) -> Dict:
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
lowercase_ : List[Any] = tf.shape(UpperCAmelCase__ )
lowercase_ : Union[str, Any] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
lowercase_ : Dict = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : tf.Tensor ) -> tf.Tensor:
if not isinstance(UpperCAmelCase__ , tf.Tensor ):
lowercase_ : List[Any] = tf.convert_to_tensor(UpperCAmelCase__ ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
lowercase_ : Any = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
lowercase_ : List[Any] = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
lowercase_ : Optional[Any] = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def lowerCamelCase ( UpperCAmelCase__ : tf.Tensor , UpperCAmelCase__ : int , UpperCAmelCase__ : str = "input_ids" ) -> None:
tf.debugging.assert_less(
UpperCAmelCase__ , tf.cast(UpperCAmelCase__ , dtype=tensor.dtype ) , message=(
F'''The maximum value of {tensor_name} ({tf.math.reduce_max(UpperCAmelCase__ )}) must be smaller than the embedding '''
F'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'''
) , )
def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] ) -> Any:
lowercase_ : int = 64512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
lowercase_ : Optional[Any] = [x for x in data if len(UpperCAmelCase__ ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"""The following attributes cannot be saved to HDF5 file because """
F'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} '''
F'''bytes: {bad_attributes}''' )
lowercase_ : Any = np.asarray(UpperCAmelCase__ )
lowercase_ : Union[str, Any] = 1
lowercase_ : Optional[Any] = np.array_split(UpperCAmelCase__ , UpperCAmelCase__ )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
lowercase_ : Optional[Any] = np.array_split(UpperCAmelCase__ , UpperCAmelCase__ )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(UpperCAmelCase__ ):
lowercase_ : Union[str, Any] = chunk_data
else:
lowercase_ : Any = data
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] ) -> str:
if name in group.attrs:
lowercase_ : Optional[Any] = [n.decode("""utf8""" ) if hasattr(UpperCAmelCase__ , """decode""" ) else n for n in group.attrs[name]]
else:
lowercase_ : int = []
lowercase_ : Optional[int] = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("""utf8""" ) if hasattr(UpperCAmelCase__ , """decode""" ) else n for n in group.attrs["""%s%d""" % (name, chunk_id)]] )
chunk_id += 1
return data
def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] ) -> Any:
def _expand_single_ad_tensor(UpperCAmelCase__ : Optional[Any] ):
if isinstance(UpperCAmelCase__ , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(UpperCAmelCase__ , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , UpperCAmelCase__ )
| 21
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = StableUnCLIPImgaImgPipeline
UpperCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
UpperCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase__ = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCamelCase__ = frozenset([])
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : Optional[Any] = 32
lowercase_ : Optional[Any] = embedder_hidden_size
# image encoding components
lowercase_ : Any = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
lowercase_ : str = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowercase_ , projection_dim=lowercase_ , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
lowercase_ : List[Any] = StableUnCLIPImageNormalizer(embedding_dim=lowercase_ )
lowercase_ : str = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
lowercase_ : Dict = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
lowercase_ : Any = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase_ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowercase_ : Dict = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowercase_ , layers_per_block=1 , upcast_attention=lowercase_ , use_linear_projection=lowercase_ , )
torch.manual_seed(0 )
lowercase_ : Dict = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type="""v_prediction""" , set_alpha_to_one=lowercase_ , steps_offset=1 , )
torch.manual_seed(0 )
lowercase_ : Optional[int] = AutoencoderKL()
lowercase_ : List[Any] = {
# image encoding components
"""feature_extractor""": feature_extractor,
"""image_encoder""": image_encoder.eval(),
# image noising components
"""image_normalizer""": image_normalizer.eval(),
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder.eval(),
"""unet""": unet.eval(),
"""scheduler""": scheduler,
"""vae""": vae.eval(),
}
return components
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : str , lowercase_ : List[str]=0 , lowercase_ : List[Any]=True ):
if str(lowercase_ ).startswith("""mps""" ):
lowercase_ : Optional[Any] = torch.manual_seed(lowercase_ )
else:
lowercase_ : Optional[Any] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowercase_ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
if pil_image:
lowercase_ : Optional[int] = input_image * 0.5 + 0.5
lowercase_ : List[Any] = input_image.clamp(0 , 1 )
lowercase_ : Tuple = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowercase_ : List[str] = DiffusionPipeline.numpy_to_pil(lowercase_ )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase_ : Union[str, Any] = self.get_dummy_components()
lowercase_ : Any = StableUnCLIPImgaImgPipeline(**lowercase_ )
lowercase_ : List[Any] = sd_pipe.to(lowercase_ )
sd_pipe.set_progress_bar_config(disable=lowercase_ )
lowercase_ : int = self.get_dummy_inputs(lowercase_ )
inputs.update({"""image_embeds""": None} )
lowercase_ : int = sd_pipe(**lowercase_ ).images
lowercase_ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase_ : int = np.array([0.38_72, 0.72_24, 0.56_01, 0.47_41, 0.68_72, 0.58_14, 0.46_36, 0.38_67, 0.50_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : Union[str, Any] = torch_device in ["""cpu""", """mps"""]
self._test_attention_slicing_forward_pass(test_max_difference=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : List[Any] = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=lowercase_ )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowercase_ )
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
lowercase_ : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy""" )
lowercase_ : Tuple = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-l-img2img""" , torch_dtype=torch.floataa )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase_ : Optional[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowercase_ : Optional[Any] = pipe(lowercase_ , """anime turle""" , generator=lowercase_ , output_type="""np""" )
lowercase_ : List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
lowercase_ : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy""" )
lowercase_ : Dict = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase_ : Dict = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowercase_ : str = pipe(lowercase_ , """anime turle""" , generator=lowercase_ , output_type="""np""" )
lowercase_ : str = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase_ : Tuple = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa )
lowercase_ : Dict = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase_ : Any = pipe(
lowercase_ , """anime turtle""" , num_inference_steps=2 , output_type="""np""" , )
lowercase_ : int = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 21
|
'''simple docstring'''
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def lowerCamelCase ( UpperCAmelCase__ : int ) -> int:
lowercase_ : Any = prime_factors(UpperCAmelCase__ )
if is_square_free(UpperCAmelCase__ ):
return -1 if len(UpperCAmelCase__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21
| 1
|
'''simple docstring'''
import math
from collections.abc import Iterator
from itertools import takewhile
def lowerCamelCase ( UpperCAmelCase__ : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCamelCase ( ) -> Iterator[int]:
lowercase_ : List[str] = 2
while True:
if is_prime(UpperCAmelCase__ ):
yield num
num += 1
def lowerCamelCase ( UpperCAmelCase__ : int = 2000000 ) -> int:
return sum(takewhile(lambda UpperCAmelCase__ : x < n , prime_generator() ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 21
|
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : int = 1000000 ) -> int:
lowercase_ : List[Any] = limit + 1
lowercase_ : Optional[Any] = [0] * limit
for first_term in range(1 , UpperCAmelCase__ ):
for n in range(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ : List[Any] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
lowercase_ : List[Any] = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 21
| 1
|
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] ) -> Union[str, Any]:
lowercase_ : List[str] = []
lowercase_ : Optional[Any] = []
lowercase_ : List[str] = {
"""^""": 3,
"""*""": 2,
"""/""": 2,
"""%""": 2,
"""+""": 1,
"""-""": 1,
} # Priority of each operator
lowercase_ : int = len(UpperCAmelCase__ ) if (len(UpperCAmelCase__ ) > 7) else 7
# Print table header for output
print(
"""Symbol""".center(8 ) , """Stack""".center(UpperCAmelCase__ ) , """Postfix""".center(UpperCAmelCase__ ) , sep=""" | """ , )
print("""-""" * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(UpperCAmelCase__ ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(UpperCAmelCase__ ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(UpperCAmelCase__ ) == 0:
stack.append(UpperCAmelCase__ ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(UpperCAmelCase__ ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(UpperCAmelCase__ ) # push x to stack
print(
x.center(8 ) , ("""""".join(UpperCAmelCase__ )).ljust(UpperCAmelCase__ ) , ("""""".join(UpperCAmelCase__ )).ljust(UpperCAmelCase__ ) , sep=""" | """ , ) # Output in tabular format
while len(UpperCAmelCase__ ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
""" """.center(8 ) , ("""""".join(UpperCAmelCase__ )).ljust(UpperCAmelCase__ ) , ("""""".join(UpperCAmelCase__ )).ljust(UpperCAmelCase__ ) , sep=""" | """ , ) # Output in tabular format
return "".join(UpperCAmelCase__ ) # return Postfix as str
def lowerCamelCase ( UpperCAmelCase__ : str ) -> str:
lowercase_ : str = list(infix[::-1] ) # reverse the infix equation
for i in range(len(UpperCAmelCase__ ) ):
if infix[i] == "(":
lowercase_ : List[str] = """)""" # change "(" to ")"
elif infix[i] == ")":
lowercase_ : int = """(""" # change ")" to "("
return (infix_2_postfix("""""".join(UpperCAmelCase__ ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
_lowercase : Tuple = input("\nEnter an Infix Equation = ") # Input an Infix equation
_lowercase : Any = "".join(Infix.split()) # Remove spaces from the input
print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
| 21
|
'''simple docstring'''
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __magic_name__ ( unittest.TestCase):
@parameterized.expand([(None,), ("""foo.json""",)] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : str ):
lowercase_ : Union[str, Any] = GenerationConfig(
do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowercase_ , config_name=lowercase_ )
lowercase_ : Optional[int] = GenerationConfig.from_pretrained(lowercase_ , config_name=lowercase_ )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , lowercase_ )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : int = AutoConfig.from_pretrained("""gpt2""" )
lowercase_ : List[Any] = GenerationConfig.from_model_config(lowercase_ )
lowercase_ : Optional[int] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(lowercase_ , lowercase_ )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Optional[int] = GenerationConfig()
lowercase_ : int = {
"""max_new_tokens""": 1024,
"""foo""": """bar""",
}
lowercase_ : List[str] = copy.deepcopy(lowercase_ )
lowercase_ : Tuple = generation_config.update(**lowercase_ )
# update_kwargs was not modified (no side effects)
self.assertEqual(lowercase_ , lowercase_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(lowercase_ , {"""foo""": """bar"""} )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : Dict = GenerationConfig()
lowercase_ : int = """bar"""
with tempfile.TemporaryDirectory("""test-generation-config""" ) as tmp_dir:
generation_config.save_pretrained(lowercase_ )
lowercase_ : Optional[int] = GenerationConfig.from_pretrained(lowercase_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , """bar""" )
lowercase_ : List[str] = GenerationConfig.from_model_config(lowercase_ )
assert not hasattr(lowercase_ , """foo""" ) # no new kwargs should be initialized if from config
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Optional[int] = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , lowercase_ )
self.assertEqual(default_config.num_beams , 1 )
lowercase_ : Dict = GenerationConfig(
do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , lowercase_ )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowercase_ )
lowercase_ : Tuple = GenerationConfig.from_pretrained(lowercase_ , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , lowercase_ )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __magic_name__ ( unittest.TestCase):
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Any ):
lowercase_ : int = TOKEN
HfFolder.save_token(lowercase_ )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] ):
try:
delete_repo(token=cls._token , repo_id="""test-generation-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""" )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Tuple = GenerationConfig(
do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""test-generation-config""" , use_auth_token=self._token )
lowercase_ : List[Any] = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-generation-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowercase_ , repo_id="""test-generation-config""" , push_to_hub=lowercase_ , use_auth_token=self._token )
lowercase_ : int = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : List[Any] = GenerationConfig(
do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token )
lowercase_ : Optional[Any] = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowercase_ , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=lowercase_ , use_auth_token=self._token )
lowercase_ : int = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
| 21
| 1
|
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : list ) -> list:
if len(UpperCAmelCase__ ) < 2:
return collection
def circle_sort_util(UpperCAmelCase__ : list , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> bool:
lowercase_ : str = False
if low == high:
return swapped
lowercase_ : str = low
lowercase_ : Optional[Any] = high
while left < right:
if collection[left] > collection[right]:
lowercase_ , lowercase_ : str = (
collection[right],
collection[left],
)
lowercase_ : Optional[int] = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
lowercase_ , lowercase_ : List[Any] = (
collection[right + 1],
collection[left],
)
lowercase_ : Optional[Any] = True
lowercase_ : Dict = low + int((high - low) / 2 )
lowercase_ : List[str] = circle_sort_util(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : Union[str, Any] = circle_sort_util(UpperCAmelCase__ , mid + 1 , UpperCAmelCase__ )
return swapped or left_swap or right_swap
lowercase_ : List[Any] = True
while is_not_sorted is True:
lowercase_ : Tuple = circle_sort_util(UpperCAmelCase__ , 0 , len(UpperCAmelCase__ ) - 1 )
return collection
if __name__ == "__main__":
_lowercase : Dict = input("Enter numbers separated by a comma:\n").strip()
_lowercase : int = [int(item) for item in user_input.split(",")]
print(circle_sort(unsorted))
| 21
|
'''simple docstring'''
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] ) -> List[Any]:
# Initialise PyTorch model
lowercase_ : List[str] = FunnelConfig.from_json_file(UpperCAmelCase__ )
print(F'''Building PyTorch model from configuration: {config}''' )
lowercase_ : Dict = FunnelBaseModel(UpperCAmelCase__ ) if base_model else FunnelModel(UpperCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , UpperCAmelCase__ )
if __name__ == "__main__":
_lowercase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not."
)
_lowercase : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 21
| 1
|
'''simple docstring'''
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"""kwargs, expected""" , [
({"""num_shards""": 0, """max_num_jobs""": 1}, []),
({"""num_shards""": 10, """max_num_jobs""": 1}, [range(10 )]),
({"""num_shards""": 10, """max_num_jobs""": 10}, [range(UpperCAmelCase__ , i + 1 ) for i in range(10 )]),
({"""num_shards""": 1, """max_num_jobs""": 10}, [range(1 )]),
({"""num_shards""": 10, """max_num_jobs""": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"""num_shards""": 3, """max_num_jobs""": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict ) -> List[str]:
lowercase_ : str = _distribute_shards(**UpperCAmelCase__ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, max_num_jobs, expected""" , [
({"""foo""": 0}, 10, [{"""foo""": 0}]),
({"""shards""": [0, 1, 2, 3]}, 1, [{"""shards""": [0, 1, 2, 3]}]),
({"""shards""": [0, 1, 2, 3]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}, {"""shards""": [2]}, {"""shards""": [3]}]),
({"""shards""": [0, 1]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}]),
({"""shards""": [0, 1, 2, 3]}, 2, [{"""shards""": [0, 1]}, {"""shards""": [2, 3]}]),
] , )
def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] ) -> Optional[int]:
lowercase_ : List[Any] = _split_gen_kwargs(UpperCAmelCase__ , UpperCAmelCase__ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, expected""" , [
({"""foo""": 0}, 1),
({"""shards""": [0]}, 1),
({"""shards""": [0, 1, 2, 3]}, 4),
({"""shards""": [0, 1, 2, 3], """foo""": 0}, 4),
({"""shards""": [0, 1, 2, 3], """other""": (0, 1)}, 4),
({"""shards""": [0, 1, 2, 3], """shards2""": [0, 1]}, RuntimeError),
] , )
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any ) -> Optional[int]:
if expected is RuntimeError:
with pytest.raises(UpperCAmelCase__ ):
_number_of_shards_in_gen_kwargs(UpperCAmelCase__ )
else:
lowercase_ : int = _number_of_shards_in_gen_kwargs(UpperCAmelCase__ )
assert out == expected
| 21
|
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
_lowercase : Optional[List[str]] = None
_lowercase : str = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
_lowercase : Optional[int] = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class __magic_name__ :
UpperCamelCase__ = True
UpperCamelCase__ = None
# Automatically constructed
UpperCamelCase__ = "PIL.Image.Image"
UpperCamelCase__ = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()})
UpperCamelCase__ = field(default='''Image''', init=_UpperCAmelCase, repr=_UpperCAmelCase)
def __call__( self : Tuple ):
return self.pa_type
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if isinstance(lowercase_ , lowercase_ ):
lowercase_ : int = np.array(lowercase_ )
if isinstance(lowercase_ , lowercase_ ):
return {"path": value, "bytes": None}
elif isinstance(lowercase_ , lowercase_ ):
return {"path": None, "bytes": value}
elif isinstance(lowercase_ , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(lowercase_ )
elif isinstance(lowercase_ , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(lowercase_ )
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : dict , lowercase_ : List[str]=None ):
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Image(decode=True) instead.""" )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support decoding images, please install 'Pillow'.""" )
if token_per_repo_id is None:
lowercase_ : Union[str, Any] = {}
lowercase_ , lowercase_ : List[Any] = value["""path"""], value["""bytes"""]
if bytes_ is None:
if path is None:
raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(lowercase_ ):
lowercase_ : int = PIL.Image.open(lowercase_ )
else:
lowercase_ : str = path.split("""::""" )[-1]
try:
lowercase_ : Any = string_to_dict(lowercase_ , config.HUB_DATASETS_URL )["""repo_id"""]
lowercase_ : Optional[Any] = token_per_repo_id.get(lowercase_ )
except ValueError:
lowercase_ : str = None
with xopen(lowercase_ , """rb""" , use_auth_token=lowercase_ ) as f:
lowercase_ : Dict = BytesIO(f.read() )
lowercase_ : Optional[Any] = PIL.Image.open(bytes_ )
else:
lowercase_ : Any = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def SCREAMING_SNAKE_CASE_ ( self : int ):
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
if pa.types.is_string(storage.type ):
lowercase_ : str = pa.array([None] * len(lowercase_ ) , type=pa.binary() )
lowercase_ : Any = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
lowercase_ : str = pa.array([None] * len(lowercase_ ) , type=pa.string() )
lowercase_ : Any = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
lowercase_ : Optional[int] = storage.field("""bytes""" )
else:
lowercase_ : Optional[Any] = pa.array([None] * len(lowercase_ ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
lowercase_ : Dict = storage.field("""path""" )
else:
lowercase_ : int = pa.array([None] * len(lowercase_ ) , type=pa.string() )
lowercase_ : Dict = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
lowercase_ : Optional[int] = pa.array(
[encode_np_array(np.array(lowercase_ ) )["""bytes"""] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
lowercase_ : Tuple = pa.array([None] * len(lowercase_ ) , type=pa.string() )
lowercase_ : Tuple = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(lowercase_ , self.pa_type )
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(lowercase_ : Optional[Any] ):
with xopen(lowercase_ , """rb""" ) as f:
lowercase_ : int = f.read()
return bytes_
lowercase_ : Optional[Any] = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowercase_ : Any = pa.array(
[os.path.basename(lowercase_ ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
lowercase_ : Dict = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(lowercase_ , self.pa_type )
def lowerCamelCase ( ) -> List[str]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
lowercase_ : int = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def lowerCamelCase ( UpperCAmelCase__ : "PIL.Image.Image" ) -> bytes:
lowercase_ : Tuple = BytesIO()
if image.format in list_image_compression_formats():
lowercase_ : int = image.format
else:
lowercase_ : int = """PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF"""
image.save(UpperCAmelCase__ , format=UpperCAmelCase__ )
return buffer.getvalue()
def lowerCamelCase ( UpperCAmelCase__ : "PIL.Image.Image" ) -> dict:
if hasattr(UpperCAmelCase__ , """filename""" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(UpperCAmelCase__ )}
def lowerCamelCase ( UpperCAmelCase__ : np.ndarray ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
lowercase_ : List[Any] = array.dtype
lowercase_ : int = dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER
lowercase_ : Dict = dtype.kind
lowercase_ : List[Any] = dtype.itemsize
lowercase_ : Any = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
lowercase_ : int = np.dtype("""|u1""" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
lowercase_ : str = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
lowercase_ : str = dtype_byteorder + dtype_kind + str(UpperCAmelCase__ )
lowercase_ : Optional[Any] = np.dtype(UpperCAmelCase__ )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
lowercase_ : Optional[int] = PIL.Image.fromarray(array.astype(UpperCAmelCase__ ) )
return {"path": None, "bytes": image_to_bytes(UpperCAmelCase__ )}
def lowerCamelCase ( UpperCAmelCase__ : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if objs:
lowercase_ , lowercase_ : Dict = first_non_null_value(UpperCAmelCase__ )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(UpperCAmelCase__ , np.ndarray ):
lowercase_ : Union[str, Any] = no_op_if_value_is_null(UpperCAmelCase__ )
return [obj_to_image_dict_func(UpperCAmelCase__ ) for obj in objs]
elif isinstance(UpperCAmelCase__ , PIL.Image.Image ):
lowercase_ : int = no_op_if_value_is_null(UpperCAmelCase__ )
return [obj_to_image_dict_func(UpperCAmelCase__ ) for obj in objs]
else:
return objs
else:
return objs
| 21
| 1
|
'''simple docstring'''
def lowerCamelCase ( ) -> Optional[int]:
for n in range(1 , 1000000 ):
yield n * (n + 1) // 2
def lowerCamelCase ( UpperCAmelCase__ : str ) -> List[str]:
lowercase_ : str = 1
lowercase_ : List[str] = 2
while i * i <= n:
lowercase_ : Any = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def lowerCamelCase ( ) -> Any:
return next(i for i in triangle_number_generator() if count_divisors(UpperCAmelCase__ ) > 500 )
if __name__ == "__main__":
print(solution())
| 21
|
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def lowerCamelCase ( UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : int ) -> float:
lowercase_ : List[Any] = x
lowercase_ : Any = y
for step in range(UpperCAmelCase__ ): # noqa: B007
lowercase_ : Dict = a * a - b * b + x
lowercase_ : str = 2 * a * b + y
lowercase_ : Optional[Any] = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def lowerCamelCase ( UpperCAmelCase__ : float ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def lowerCamelCase ( UpperCAmelCase__ : float ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(UpperCAmelCase__ , 1 , 1 ) )
def lowerCamelCase ( UpperCAmelCase__ : int = 800 , UpperCAmelCase__ : int = 600 , UpperCAmelCase__ : float = -0.6 , UpperCAmelCase__ : float = 0 , UpperCAmelCase__ : float = 3.2 , UpperCAmelCase__ : int = 50 , UpperCAmelCase__ : bool = True , ) -> Image.Image:
lowercase_ : Union[str, Any] = Image.new("""RGB""" , (image_width, image_height) )
lowercase_ : Tuple = img.load()
# loop through the image-coordinates
for image_x in range(UpperCAmelCase__ ):
for image_y in range(UpperCAmelCase__ ):
# determine the figure-coordinates based on the image-coordinates
lowercase_ : Any = figure_width / image_width * image_height
lowercase_ : Tuple = figure_center_x + (image_x / image_width - 0.5) * figure_width
lowercase_ : Union[str, Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height
lowercase_ : str = get_distance(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
lowercase_ : List[Any] = get_color_coded_rgb(UpperCAmelCase__ )
else:
lowercase_ : Dict = get_black_and_white_rgb(UpperCAmelCase__ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
_lowercase : List[str] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 21
| 1
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __magic_name__ ( _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = DDIMPipeline
UpperCamelCase__ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
UpperCamelCase__ = PipelineTesterMixin.required_optional_params - {
'''num_images_per_prompt''',
'''latents''',
'''callback''',
'''callback_steps''',
}
UpperCamelCase__ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
torch.manual_seed(0 )
lowercase_ : str = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
lowercase_ : Optional[Any] = DDIMScheduler()
lowercase_ : Tuple = {"""unet""": unet, """scheduler""": scheduler}
return components
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Optional[int] , lowercase_ : List[Any]=0 ):
if str(lowercase_ ).startswith("""mps""" ):
lowercase_ : List[str] = torch.manual_seed(lowercase_ )
else:
lowercase_ : str = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowercase_ : Dict = {
"""batch_size""": 1,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : Tuple = """cpu"""
lowercase_ : Union[str, Any] = self.get_dummy_components()
lowercase_ : str = self.pipeline_class(**lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
lowercase_ : List[str] = self.get_dummy_inputs(lowercase_ )
lowercase_ : str = pipe(**lowercase_ ).images
lowercase_ : Dict = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
lowercase_ : str = np.array(
[1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] )
lowercase_ : List[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowercase_ , 1E-3 )
def SCREAMING_SNAKE_CASE_ ( self : int ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
super().test_save_load_local(expected_max_difference=3E-3 )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : Optional[Any] = """google/ddpm-cifar10-32"""
lowercase_ : Any = UNetaDModel.from_pretrained(lowercase_ )
lowercase_ : Union[str, Any] = DDIMScheduler()
lowercase_ : str = DDIMPipeline(unet=lowercase_ , scheduler=lowercase_ )
ddim.to(lowercase_ )
ddim.set_progress_bar_config(disable=lowercase_ )
lowercase_ : Tuple = torch.manual_seed(0 )
lowercase_ : Dict = ddim(generator=lowercase_ , eta=0.0 , output_type="""numpy""" ).images
lowercase_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase_ : str = np.array([0.17_23, 0.16_17, 0.16_00, 0.16_26, 0.14_97, 0.15_13, 0.15_05, 0.14_42, 0.14_53] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Tuple = """google/ddpm-ema-bedroom-256"""
lowercase_ : Any = UNetaDModel.from_pretrained(lowercase_ )
lowercase_ : int = DDIMScheduler.from_pretrained(lowercase_ )
lowercase_ : Any = DDIMPipeline(unet=lowercase_ , scheduler=lowercase_ )
ddpm.to(lowercase_ )
ddpm.set_progress_bar_config(disable=lowercase_ )
lowercase_ : Optional[Any] = torch.manual_seed(0 )
lowercase_ : Tuple = ddpm(generator=lowercase_ , output_type="""numpy""" ).images
lowercase_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowercase_ : Tuple = np.array([0.00_60, 0.02_01, 0.03_44, 0.00_24, 0.00_18, 0.00_02, 0.00_22, 0.00_00, 0.00_69] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 21
|
'''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = DistilBertTokenizer
UpperCamelCase__ = DistilBertTokenizerFast
UpperCamelCase__ = True
@slow
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : int = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" )
lowercase_ : str = tokenizer.encode("""sequence builders""" , add_special_tokens=lowercase_ )
lowercase_ : Optional[int] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowercase_ )
lowercase_ : Dict = tokenizer.build_inputs_with_special_tokens(lowercase_ )
lowercase_ : Tuple = tokenizer.build_inputs_with_special_tokens(lowercase_ , lowercase_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 21
| 1
|
'''simple docstring'''
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] ) -> str:
if isinstance(UpperCAmelCase__ , torch.Tensor ):
return image
elif isinstance(UpperCAmelCase__ , PIL.Image.Image ):
lowercase_ : str = [image]
if isinstance(image[0] , PIL.Image.Image ):
lowercase_ : Optional[int] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image]
lowercase_ : Tuple = np.concatenate(UpperCAmelCase__ , axis=0 )
lowercase_ : str = np.array(UpperCAmelCase__ ).astype(np.floataa ) / 255.0
lowercase_ : Optional[int] = image.transpose(0 , 3 , 1 , 2 )
lowercase_ : Union[str, Any] = 2.0 * image - 1.0
lowercase_ : Tuple = torch.from_numpy(UpperCAmelCase__ )
elif isinstance(image[0] , torch.Tensor ):
lowercase_ : int = torch.cat(UpperCAmelCase__ , dim=0 )
return image
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple=0.9995 ) -> List[str]:
if not isinstance(UpperCAmelCase__ , np.ndarray ):
lowercase_ : Optional[Any] = True
lowercase_ : Tuple = va.device
lowercase_ : Any = va.cpu().numpy()
lowercase_ : List[Any] = va.cpu().numpy()
lowercase_ : List[Any] = np.sum(va * va / (np.linalg.norm(UpperCAmelCase__ ) * np.linalg.norm(UpperCAmelCase__ )) )
if np.abs(UpperCAmelCase__ ) > DOT_THRESHOLD:
lowercase_ : Optional[Any] = (1 - t) * va + t * va
else:
lowercase_ : Dict = np.arccos(UpperCAmelCase__ )
lowercase_ : Union[str, Any] = np.sin(UpperCAmelCase__ )
lowercase_ : Optional[Any] = theta_a * t
lowercase_ : Optional[Any] = np.sin(UpperCAmelCase__ )
lowercase_ : Optional[Any] = np.sin(theta_a - theta_t ) / sin_theta_a
lowercase_ : Dict = sin_theta_t / sin_theta_a
lowercase_ : Tuple = sa * va + sa * va
if inputs_are_torch:
lowercase_ : Tuple = torch.from_numpy(UpperCAmelCase__ ).to(UpperCAmelCase__ )
return va
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] ) -> Dict:
lowercase_ : Any = F.normalize(UpperCAmelCase__ , dim=-1 )
lowercase_ : Any = F.normalize(UpperCAmelCase__ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple ) -> str:
for param in model.parameters():
lowercase_ : Optional[int] = value
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Dict , lowercase_ : AutoencoderKL , lowercase_ : CLIPTextModel , lowercase_ : CLIPModel , lowercase_ : CLIPTokenizer , lowercase_ : UNetaDConditionModel , lowercase_ : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , lowercase_ : CLIPFeatureExtractor , lowercase_ : Dict=None , lowercase_ : List[str]=None , lowercase_ : Optional[Any]=None , ):
super().__init__()
self.register_modules(
vae=lowercase_ , text_encoder=lowercase_ , clip_model=lowercase_ , tokenizer=lowercase_ , unet=lowercase_ , scheduler=lowercase_ , feature_extractor=lowercase_ , coca_model=lowercase_ , coca_tokenizer=lowercase_ , coca_transform=lowercase_ , )
lowercase_ : Dict = (
feature_extractor.size
if isinstance(feature_extractor.size , lowercase_ )
else feature_extractor.size["""shortest_edge"""]
)
lowercase_ : str = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , lowercase_ )
set_requires_grad(self.clip_model , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase_ : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
self.enable_attention_slicing(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : int ):
set_requires_grad(self.vae , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
set_requires_grad(self.vae , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
set_requires_grad(self.unet , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
set_requires_grad(self.unet , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[int] ):
# get the original timestep using init_timestep
lowercase_ : Any = min(int(num_inference_steps * strength ) , lowercase_ )
lowercase_ : List[Any] = max(num_inference_steps - init_timestep , 0 )
lowercase_ : List[str] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Any , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any]=None ):
if not isinstance(lowercase_ , torch.Tensor ):
raise ValueError(f'''`image` has to be of type `torch.Tensor` but is {type(lowercase_ )}''' )
lowercase_ : Union[str, Any] = image.to(device=lowercase_ , dtype=lowercase_ )
if isinstance(lowercase_ , lowercase_ ):
lowercase_ : Tuple = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowercase_ )
]
lowercase_ : Dict = torch.cat(lowercase_ , dim=0 )
else:
lowercase_ : List[str] = self.vae.encode(lowercase_ ).latent_dist.sample(lowercase_ )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowercase_ : List[str] = 0.1_82_15 * init_latents
lowercase_ : int = init_latents.repeat_interleave(lowercase_ , dim=0 )
lowercase_ : Dict = randn_tensor(init_latents.shape , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ )
# get latents
lowercase_ : List[Any] = self.scheduler.add_noise(lowercase_ , lowercase_ , lowercase_ )
lowercase_ : List[str] = init_latents
return latents
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Optional[Any] ):
lowercase_ : Optional[int] = self.coca_transform(lowercase_ ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
lowercase_ : str = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
lowercase_ : Any = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("""<end_of_text>""" )[0].replace("""<start_of_text>""" , """""" ).rstrip(""" .,""" )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : Any ):
lowercase_ : List[Any] = self.feature_extractor.preprocess(lowercase_ )
lowercase_ : List[Any] = torch.from_numpy(clip_image_input["""pixel_values"""][0] ).unsqueeze(0 ).to(self.device ).half()
lowercase_ : Optional[Any] = self.clip_model.get_image_features(lowercase_ )
lowercase_ : List[str] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=lowercase_ )
lowercase_ : List[Any] = image_embeddings_clip.repeat_interleave(lowercase_ , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : Any , lowercase_ : Tuple , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , ):
lowercase_ : Union[str, Any] = latents.detach().requires_grad_()
lowercase_ : Any = self.scheduler.scale_model_input(lowercase_ , lowercase_ )
# predict the noise residual
lowercase_ : List[str] = self.unet(lowercase_ , lowercase_ , encoder_hidden_states=lowercase_ ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
lowercase_ : List[str] = self.scheduler.alphas_cumprod[timestep]
lowercase_ : int = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase_ : Dict = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
lowercase_ : List[Any] = torch.sqrt(lowercase_ )
lowercase_ : Optional[int] = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , lowercase_ ):
lowercase_ : Optional[int] = self.scheduler.sigmas[index]
lowercase_ : Tuple = latents - sigma * noise_pred
else:
raise ValueError(f'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowercase_ : Union[str, Any] = 1 / 0.1_82_15 * sample
lowercase_ : Tuple = self.vae.decode(lowercase_ ).sample
lowercase_ : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
lowercase_ : Dict = transforms.Resize(self.feature_extractor_size )(lowercase_ )
lowercase_ : List[str] = self.normalize(lowercase_ ).to(latents.dtype )
lowercase_ : str = self.clip_model.get_image_features(lowercase_ )
lowercase_ : Any = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=lowercase_ )
lowercase_ : Dict = spherical_dist_loss(lowercase_ , lowercase_ ).mean() * clip_guidance_scale
lowercase_ : List[Any] = -torch.autograd.grad(lowercase_ , lowercase_ )[0]
if isinstance(self.scheduler , lowercase_ ):
lowercase_ : Union[str, Any] = latents.detach() + grads * (sigma**2)
lowercase_ : Tuple = noise_pred_original
else:
lowercase_ : Dict = noise_pred_original - torch.sqrt(lowercase_ ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self : List[str] , lowercase_ : Union[torch.FloatTensor, PIL.Image.Image] , lowercase_ : Union[torch.FloatTensor, PIL.Image.Image] , lowercase_ : Optional[str] = None , lowercase_ : Optional[str] = None , lowercase_ : Optional[int] = 512 , lowercase_ : Optional[int] = 512 , lowercase_ : float = 0.6 , lowercase_ : Optional[int] = 50 , lowercase_ : Optional[float] = 7.5 , lowercase_ : Optional[int] = 1 , lowercase_ : float = 0.0 , lowercase_ : Optional[float] = 100 , lowercase_ : Optional[torch.Generator] = None , lowercase_ : Optional[str] = "pil" , lowercase_ : bool = True , lowercase_ : float = 0.8 , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , ):
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size:
raise ValueError(f'''You have passed {batch_size} batch_size, but only {len(lowercase_ )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(lowercase_ , torch.Generator ) and batch_size > 1:
lowercase_ : List[Any] = [generator] + [None] * (batch_size - 1)
lowercase_ : Optional[int] = [
("""model""", self.coca_model is None),
("""tokenizer""", self.coca_tokenizer is None),
("""transform""", self.coca_transform is None),
]
lowercase_ : Dict = [x[0] for x in coca_is_none if x[1]]
lowercase_ : Tuple = """, """.join(lowercase_ )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(lowercase_ ):
raise ValueError(
f'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
f'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
lowercase_ : int = self.get_image_description(lowercase_ )
if style_prompt is None:
if len(lowercase_ ):
raise ValueError(
f'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
f''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
lowercase_ : int = self.get_image_description(lowercase_ )
# get prompt text embeddings for content and style
lowercase_ : List[Any] = self.tokenizer(
lowercase_ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , truncation=lowercase_ , return_tensors="""pt""" , )
lowercase_ : Union[str, Any] = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
lowercase_ : List[str] = self.tokenizer(
lowercase_ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , truncation=lowercase_ , return_tensors="""pt""" , )
lowercase_ : List[str] = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
lowercase_ : Dict = slerp(lowercase_ , lowercase_ , lowercase_ )
# duplicate text embeddings for each generation per prompt
lowercase_ : str = text_embeddings.repeat_interleave(lowercase_ , dim=0 )
# set timesteps
lowercase_ : Optional[Any] = """offset""" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
lowercase_ : List[Any] = {}
if accepts_offset:
lowercase_ : Optional[Any] = 1
self.scheduler.set_timesteps(lowercase_ , **lowercase_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
lowercase_ , lowercase_ : Optional[Any] = self.get_timesteps(lowercase_ , lowercase_ , self.device )
lowercase_ : Tuple = timesteps[:1].repeat(lowercase_ )
# Preprocess image
lowercase_ : Union[str, Any] = preprocess(lowercase_ , lowercase_ , lowercase_ )
lowercase_ : Dict = self.prepare_latents(
lowercase_ , lowercase_ , lowercase_ , text_embeddings.dtype , self.device , lowercase_ )
lowercase_ : Union[str, Any] = preprocess(lowercase_ , lowercase_ , lowercase_ )
lowercase_ : Union[str, Any] = self.prepare_latents(
lowercase_ , lowercase_ , lowercase_ , text_embeddings.dtype , self.device , lowercase_ )
lowercase_ : str = slerp(lowercase_ , lowercase_ , lowercase_ )
if clip_guidance_scale > 0:
lowercase_ : List[str] = self.get_clip_image_embeddings(lowercase_ , lowercase_ )
lowercase_ : int = self.get_clip_image_embeddings(lowercase_ , lowercase_ )
lowercase_ : List[Any] = slerp(
lowercase_ , lowercase_ , lowercase_ )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowercase_ : int = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowercase_ : Optional[Any] = content_text_input.input_ids.shape[-1]
lowercase_ : List[Any] = self.tokenizer([""""""] , padding="""max_length""" , max_length=lowercase_ , return_tensors="""pt""" )
lowercase_ : Optional[Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
lowercase_ : Union[str, Any] = uncond_embeddings.repeat_interleave(lowercase_ , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase_ : Any = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowercase_ : Optional[int] = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
lowercase_ : Union[str, Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
lowercase_ : Dict = torch.randn(lowercase_ , generator=lowercase_ , device="""cpu""" , dtype=lowercase_ ).to(
self.device )
else:
lowercase_ : List[Any] = torch.randn(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_ )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
lowercase_ : List[str] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowercase_ : Optional[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase_ : Any = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase_ : Optional[Any] = {}
if accepts_eta:
lowercase_ : Union[str, Any] = eta
# check if the scheduler accepts generator
lowercase_ : Optional[int] = """generator""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
lowercase_ : Any = generator
with self.progress_bar(total=lowercase_ ):
for i, t in enumerate(lowercase_ ):
# expand the latents if we are doing classifier free guidance
lowercase_ : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase_ : List[str] = self.scheduler.scale_model_input(lowercase_ , lowercase_ )
# predict the noise residual
lowercase_ : Tuple = self.unet(lowercase_ , lowercase_ , encoder_hidden_states=lowercase_ ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
lowercase_ , lowercase_ : Union[str, Any] = noise_pred.chunk(2 )
lowercase_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
lowercase_ : int = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
lowercase_ , lowercase_ : List[Any] = self.cond_fn(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , )
# compute the previous noisy sample x_t -> x_t-1
lowercase_ : List[str] = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowercase_ : Any = 1 / 0.1_82_15 * latents
lowercase_ : List[str] = self.vae.decode(lowercase_ ).sample
lowercase_ : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 )
lowercase_ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase_ : Dict = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=lowercase_ , nsfw_content_detected=lowercase_ )
| 21
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
_lowercase : Union[str, Any] = {"tokenization_herbert": ["HerbertTokenizer"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = ["HerbertTokenizerFast"]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
_lowercase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 21
| 1
|
'''simple docstring'''
import random
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : bool = False ) -> dict:
lowercase_ : dict = {i: [] for i in range(UpperCAmelCase__ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(UpperCAmelCase__ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(UpperCAmelCase__ ):
for j in range(i + 1 , UpperCAmelCase__ ):
if random.random() < probability:
graph[i].append(UpperCAmelCase__ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(UpperCAmelCase__ )
return graph
def lowerCamelCase ( UpperCAmelCase__ : int ) -> dict:
return {
i: [j for j in range(UpperCAmelCase__ ) if i != j] for i in range(UpperCAmelCase__ )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowercase : Union[str, Any] = {
"configuration_encodec": [
"ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EncodecConfig",
],
"feature_extraction_encodec": ["EncodecFeatureExtractor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] = [
"ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST",
"EncodecModel",
"EncodecPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 21
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : int = logging.get_logger(__name__)
_lowercase : int = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"}
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''ctrl'''
UpperCamelCase__ = ['''past_key_values''']
UpperCamelCase__ = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Optional[Any] , lowercase_ : Optional[Any]=246534 , lowercase_ : Dict=256 , lowercase_ : int=1280 , lowercase_ : str=8192 , lowercase_ : Any=48 , lowercase_ : List[Any]=16 , lowercase_ : int=0.1 , lowercase_ : int=0.1 , lowercase_ : Optional[int]=1E-6 , lowercase_ : str=0.02 , lowercase_ : Optional[int]=True , **lowercase_ : Optional[int] , ):
lowercase_ : str = vocab_size
lowercase_ : Any = n_positions
lowercase_ : int = n_embd
lowercase_ : List[Any] = n_layer
lowercase_ : List[Any] = n_head
lowercase_ : Any = dff
lowercase_ : Optional[Any] = resid_pdrop
lowercase_ : int = embd_pdrop
lowercase_ : str = layer_norm_epsilon
lowercase_ : Any = initializer_range
lowercase_ : Union[str, Any] = use_cache
super().__init__(**lowercase_ )
| 21
|
'''simple docstring'''
import os
import numpy
import onnx
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str ) -> Tuple:
lowercase_ : Tuple = a.name
lowercase_ : Tuple = b.name
lowercase_ : Any = """"""
lowercase_ : List[Any] = """"""
lowercase_ : List[Any] = a == b
lowercase_ : Union[str, Any] = name_a
lowercase_ : Optional[Any] = name_b
return res
def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(UpperCAmelCase__ , UpperCAmelCase__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase__ , UpperCAmelCase__ )
_graph_replace_input_with(node_proto.attribute[1].g , UpperCAmelCase__ , UpperCAmelCase__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ) -> int:
for n in graph_proto.node:
_node_replace_input_with(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict ) -> List[str]:
lowercase_ : int = list(model.graph.initializer )
lowercase_ : List[str] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
lowercase_ : Optional[Any] = inits[i].name
lowercase_ : List[str] = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : int ) -> List[str]:
lowercase_ : Dict = os.path.dirname(UpperCAmelCase__ )
lowercase_ : Optional[Any] = os.path.basename(UpperCAmelCase__ )
lowercase_ : str = onnx.load(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) )
lowercase_ : List[Any] = list(model.graph.initializer )
lowercase_ : int = set()
lowercase_ : int = {}
lowercase_ : str = []
lowercase_ : int = 0
for i in range(len(UpperCAmelCase__ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(UpperCAmelCase__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(UpperCAmelCase__ )
dup_set.add(UpperCAmelCase__ )
lowercase_ : Dict = inits[j].data_type
lowercase_ : List[str] = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("""unexpected data type: """ , UpperCAmelCase__ )
total_reduced_size += mem_size
lowercase_ : int = inits[i].name
lowercase_ : List[str] = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(UpperCAmelCase__ )
else:
lowercase_ : Optional[int] = [name_j]
ind_to_replace.append((j, i) )
print("""total reduced size: """ , total_reduced_size / 1024 / 1024 / 1024 , """GB""" )
lowercase_ : Tuple = sorted(UpperCAmelCase__ )
_remove_dup_initializers_from_model(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : Union[str, Any] = """optimized_""" + model_file_name
lowercase_ : Optional[int] = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
onnx.save(UpperCAmelCase__ , UpperCAmelCase__ )
return new_model
| 21
| 1
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
_lowercase : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : str ):
super().__init__()
self.register_modules(unet=lowercase_ , scheduler=lowercase_ )
@torch.no_grad()
def __call__( self : List[str] , lowercase_ : int = 1 , lowercase_ : int = 100 , lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ : Optional[float] = None , lowercase_ : bool = True , ):
if audio_length_in_s is None:
lowercase_ : List[Any] = self.unet.config.sample_size / self.unet.config.sample_rate
lowercase_ : Dict = audio_length_in_s * self.unet.config.sample_rate
lowercase_ : Any = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
lowercase_ : List[Any] = int(lowercase_ )
if sample_size % down_scale_factor != 0:
lowercase_ : int = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
""" process.""" )
lowercase_ : Any = int(lowercase_ )
lowercase_ : List[str] = next(iter(self.unet.parameters() ) ).dtype
lowercase_ : List[str] = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowercase_ : Any = randn_tensor(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_ )
# set step values
self.scheduler.set_timesteps(lowercase_ , device=audio.device )
lowercase_ : Optional[Any] = self.scheduler.timesteps.to(lowercase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowercase_ : Dict = self.unet(lowercase_ , lowercase_ ).sample
# 2. compute previous image: x_t -> t_t-1
lowercase_ : List[str] = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample
lowercase_ : str = audio.clamp(-1 , 1 ).float().cpu().numpy()
lowercase_ : Union[str, Any] = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowercase_ )
| 21
|
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
_lowercase : str = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase)
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : str , *lowercase_ : Dict , **lowercase_ : List[Any] ):
super().__init__(*lowercase_ , **lowercase_ )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : str=None , lowercase_ : List[Any]=None , lowercase_ : Dict=None ):
lowercase_ : Optional[Any] = {}
lowercase_ : Tuple = {}
if prompt is not None:
lowercase_ : Tuple = prompt
if generate_kwargs is not None:
lowercase_ : List[str] = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
lowercase_ : List[Any] = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
lowercase_ : str = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : List[Any] , lowercase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowercase_ : Optional[int] ):
return super().__call__(lowercase_ , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : List[Any] , lowercase_ : Tuple=None ):
lowercase_ : List[Any] = load_image(lowercase_ )
if prompt is not None:
if not isinstance(lowercase_ , lowercase_ ):
raise ValueError(
f'''Received an invalid text input, got - {type(lowercase_ )} - but expected a single string. '''
"""Note also that one single text can be provided for conditional image to text generation.""" )
lowercase_ : List[Any] = self.model.config.model_type
if model_type == "git":
lowercase_ : Dict = self.image_processor(images=lowercase_ , return_tensors=self.framework )
lowercase_ : Union[str, Any] = self.tokenizer(text=lowercase_ , add_special_tokens=lowercase_ ).input_ids
lowercase_ : int = [self.tokenizer.cls_token_id] + input_ids
lowercase_ : List[Any] = torch.tensor(lowercase_ ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
lowercase_ : Union[str, Any] = self.image_processor(images=lowercase_ , header_text=lowercase_ , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
lowercase_ : Dict = self.image_processor(images=lowercase_ , return_tensors=self.framework )
lowercase_ : List[str] = self.tokenizer(lowercase_ , return_tensors=self.framework )
model_inputs.update(lowercase_ )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
lowercase_ : List[str] = self.image_processor(images=lowercase_ , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
lowercase_ : str = None
return model_inputs
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Dict , lowercase_ : Optional[Any]=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , lowercase_ )
and all(x is None for x in model_inputs["""input_ids"""] )
):
lowercase_ : Any = None
if generate_kwargs is None:
lowercase_ : Optional[Any] = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
lowercase_ : Dict = model_inputs.pop(self.model.main_input_name )
lowercase_ : Any = self.model.generate(lowercase_ , **lowercase_ , **lowercase_ )
return model_outputs
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : List[Any] ):
lowercase_ : List[str] = []
for output_ids in model_outputs:
lowercase_ : Union[str, Any] = {
"""generated_text""": self.tokenizer.decode(
lowercase_ , skip_special_tokens=lowercase_ , )
}
records.append(lowercase_ )
return records
| 21
| 1
|
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __magic_name__ ( _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Dict=0 ):
lowercase_ : Optional[Any] = np.random.RandomState(lowercase_ )
lowercase_ : Optional[int] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=lowercase_ )
lowercase_ : int = self.get_dummy_inputs()
lowercase_ : Dict = pipe(**lowercase_ ).images
lowercase_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowercase_ : List[str] = np.array([0.6_50_72, 0.5_84_92, 0.4_82_19, 0.5_55_21, 0.5_31_80, 0.5_59_39, 0.5_06_97, 0.3_98_00, 0.4_64_55] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
lowercase_ : Tuple = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
lowercase_ : List[Any] = self.get_dummy_inputs()
lowercase_ : Optional[Any] = pipe(**lowercase_ ).images
lowercase_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowercase_ : Dict = np.array([0.6_58_63, 0.5_94_25, 0.4_93_26, 0.5_63_13, 0.5_38_75, 0.5_66_27, 0.5_10_65, 0.3_97_77, 0.4_63_30] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
lowercase_ : Optional[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowercase_ )
lowercase_ : Tuple = self.get_dummy_inputs()
lowercase_ : Optional[int] = pipe(**lowercase_ ).images
lowercase_ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowercase_ : Any = np.array([0.5_37_55, 0.6_07_86, 0.4_74_02, 0.4_94_88, 0.5_18_69, 0.4_98_19, 0.4_79_85, 0.3_89_57, 0.4_42_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : List[str] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
lowercase_ : Optional[int] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowercase_ )
lowercase_ : Optional[int] = self.get_dummy_inputs()
lowercase_ : str = pipe(**lowercase_ ).images
lowercase_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowercase_ : Any = np.array([0.5_37_55, 0.6_07_86, 0.4_74_02, 0.4_94_88, 0.5_18_69, 0.4_98_19, 0.4_79_85, 0.3_89_57, 0.4_42_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
lowercase_ : Union[str, Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowercase_ )
lowercase_ : Tuple = self.get_dummy_inputs()
lowercase_ : Optional[Any] = pipe(**lowercase_ ).images
lowercase_ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowercase_ : List[Any] = np.array([0.5_38_17, 0.6_08_12, 0.4_73_84, 0.4_95_30, 0.5_18_94, 0.4_98_14, 0.4_79_84, 0.3_89_58, 0.4_42_71] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
lowercase_ : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowercase_ )
lowercase_ : Optional[Any] = self.get_dummy_inputs()
lowercase_ : Dict = pipe(**lowercase_ ).images
lowercase_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowercase_ : Union[str, Any] = np.array([0.5_38_95, 0.6_08_08, 0.4_79_33, 0.4_96_08, 0.5_18_86, 0.4_99_50, 0.4_80_53, 0.3_89_57, 0.4_42_00] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=lowercase_ )
lowercase_ : Tuple = self.get_dummy_inputs()
lowercase_ : Optional[int] = 3 * [inputs["""prompt"""]]
# forward
lowercase_ : Optional[int] = pipe(**lowercase_ )
lowercase_ : List[Any] = output.images[0, -3:, -3:, -1]
lowercase_ : Union[str, Any] = self.get_dummy_inputs()
lowercase_ : str = 3 * [inputs.pop("""prompt""" )]
lowercase_ : Optional[Any] = pipe.tokenizer(
lowercase_ , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=lowercase_ , return_tensors="""np""" , )
lowercase_ : Optional[int] = text_inputs["""input_ids"""]
lowercase_ : Any = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
lowercase_ : int = prompt_embeds
# forward
lowercase_ : Union[str, Any] = pipe(**lowercase_ )
lowercase_ : List[Any] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=lowercase_ )
lowercase_ : Dict = self.get_dummy_inputs()
lowercase_ : Dict = 3 * ["""this is a negative prompt"""]
lowercase_ : Optional[int] = negative_prompt
lowercase_ : str = 3 * [inputs["""prompt"""]]
# forward
lowercase_ : int = pipe(**lowercase_ )
lowercase_ : Union[str, Any] = output.images[0, -3:, -3:, -1]
lowercase_ : Optional[int] = self.get_dummy_inputs()
lowercase_ : Any = 3 * [inputs.pop("""prompt""" )]
lowercase_ : Any = []
for p in [prompt, negative_prompt]:
lowercase_ : str = pipe.tokenizer(
lowercase_ , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=lowercase_ , return_tensors="""np""" , )
lowercase_ : Optional[Any] = text_inputs["""input_ids"""]
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
lowercase_ , lowercase_ : Optional[Any] = embeds
# forward
lowercase_ : Union[str, Any] = pipe(**lowercase_ )
lowercase_ : Optional[int] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@nightly
@require_onnxruntime
@require_torch_gpu
class __magic_name__ ( unittest.TestCase):
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Dict = ort.SessionOptions()
lowercase_ : List[str] = False
return options
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
# using the PNDM scheduler by default
lowercase_ : Tuple = OnnxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=lowercase_ , feature_extractor=lowercase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowercase_ )
lowercase_ : Optional[Any] = """A painting of a squirrel eating a burger"""
np.random.seed(0 )
lowercase_ : Optional[int] = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="""np""" )
lowercase_ : List[Any] = output.images
lowercase_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase_ : Tuple = np.array([0.04_52, 0.03_90, 0.00_87, 0.03_50, 0.06_17, 0.03_64, 0.05_44, 0.05_23, 0.07_20] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : Any = DDIMScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
lowercase_ : int = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=lowercase_ , safety_checker=lowercase_ , feature_extractor=lowercase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowercase_ )
lowercase_ : List[Any] = """open neural network exchange"""
lowercase_ : Any = np.random.RandomState(0 )
lowercase_ : Dict = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=lowercase_ , output_type="""np""" )
lowercase_ : int = output.images
lowercase_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase_ : List[Any] = np.array([0.28_67, 0.19_74, 0.14_81, 0.72_94, 0.72_51, 0.66_67, 0.41_94, 0.56_42, 0.64_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : List[str] = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
lowercase_ : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=lowercase_ , safety_checker=lowercase_ , feature_extractor=lowercase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowercase_ )
lowercase_ : Union[str, Any] = """open neural network exchange"""
lowercase_ : Dict = np.random.RandomState(0 )
lowercase_ : Any = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=lowercase_ , output_type="""np""" )
lowercase_ : Union[str, Any] = output.images
lowercase_ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase_ : Tuple = np.array([0.23_06, 0.19_59, 0.15_93, 0.65_49, 0.63_94, 0.54_08, 0.50_65, 0.60_10, 0.61_61] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Tuple = 0
def test_callback_fn(lowercase_ : int , lowercase_ : int , lowercase_ : np.ndarray ) -> None:
lowercase_ : Optional[Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
lowercase_ : Tuple = latents[0, -3:, -3:, -1]
lowercase_ : str = np.array(
[-0.67_72, -0.38_35, -1.24_56, 0.19_05, -1.09_74, 0.69_67, -1.93_53, 0.01_78, 1.01_67] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
lowercase_ : Optional[Any] = latents[0, -3:, -3:, -1]
lowercase_ : str = np.array(
[-0.33_51, 0.22_41, -0.18_37, -0.23_25, -0.65_77, 0.33_93, -0.02_41, 0.58_99, 1.38_75] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
lowercase_ : Dict = False
lowercase_ : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=lowercase_ , feature_extractor=lowercase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase_ )
lowercase_ : Tuple = """Andromeda galaxy in a bottle"""
lowercase_ : List[Any] = np.random.RandomState(0 )
pipe(
prompt=lowercase_ , num_inference_steps=5 , guidance_scale=7.5 , generator=lowercase_ , callback=lowercase_ , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Any = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=lowercase_ , feature_extractor=lowercase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(lowercase_ , lowercase_ )
assert pipe.safety_checker is None
lowercase_ : str = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowercase_ )
lowercase_ : List[str] = OnnxStableDiffusionPipeline.from_pretrained(lowercase_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
lowercase_ : Optional[int] = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
| 21
|
'''simple docstring'''
class __magic_name__ :
def __init__( self : int , lowercase_ : list ):
lowercase_ : Dict = set_counts
lowercase_ : List[Any] = max(lowercase_ )
lowercase_ : str = len(lowercase_ )
lowercase_ : str = [1] * num_sets
lowercase_ : Dict = list(range(lowercase_ ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int , lowercase_ : int ):
lowercase_ : List[Any] = self.get_parent(lowercase_ )
lowercase_ : Union[str, Any] = self.get_parent(lowercase_ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
lowercase_ : List[str] = 0
lowercase_ : Optional[int] = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
lowercase_ : int = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
lowercase_ : int = 0
lowercase_ : List[Any] = src_parent
lowercase_ : List[Any] = self.set_counts[src_parent]
lowercase_ : Tuple = max(self.max_set , lowercase_ )
return True
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : int ):
if self.parents[disj_set] == disj_set:
return disj_set
lowercase_ : int = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 21
| 1
|
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
_lowercase : int = random.Random()
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any=1.0 , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : List[str]=None ) -> Union[str, Any]:
if rng is None:
lowercase_ : Tuple = global_rng
lowercase_ : str = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class __magic_name__ ( unittest.TestCase):
def __init__( self : List[str] , lowercase_ : int , lowercase_ : int=7 , lowercase_ : Optional[Any]=400 , lowercase_ : Tuple=2000 , lowercase_ : str=1 , lowercase_ : str=0.0 , lowercase_ : str=16000 , lowercase_ : Optional[Any]=True , lowercase_ : int=80 , lowercase_ : int=16 , lowercase_ : int=64 , lowercase_ : str="hann_window" , lowercase_ : Optional[int]=80 , lowercase_ : List[str]=7600 , lowercase_ : List[Any]=1E-10 , lowercase_ : Dict=True , ):
lowercase_ : Optional[int] = parent
lowercase_ : str = batch_size
lowercase_ : List[str] = min_seq_length
lowercase_ : Optional[Any] = max_seq_length
lowercase_ : str = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowercase_ : Tuple = feature_size
lowercase_ : List[str] = padding_value
lowercase_ : List[str] = sampling_rate
lowercase_ : Union[str, Any] = do_normalize
lowercase_ : Optional[Any] = num_mel_bins
lowercase_ : int = hop_length
lowercase_ : Optional[Any] = win_length
lowercase_ : Tuple = win_function
lowercase_ : Any = fmin
lowercase_ : str = fmax
lowercase_ : str = mel_floor
lowercase_ : Any = return_attention_mask
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : str=False , lowercase_ : Any=False ):
def _flatten(lowercase_ : Any ):
return list(itertools.chain(*lowercase_ ) )
if equal_length:
lowercase_ : List[Any] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowercase_ : Optional[int] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowercase_ : Optional[Any] = [np.asarray(lowercase_ ) for x in speech_inputs]
return speech_inputs
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Optional[Any]=False , lowercase_ : Dict=False ):
if equal_length:
lowercase_ : Optional[Any] = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowercase_ : Union[str, Any] = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowercase_ : Tuple = [np.asarray(lowercase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
class __magic_name__ ( _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = SpeechTaFeatureExtractor
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : Optional[Any] = SpeechTaFeatureExtractionTester(self )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Tuple ):
self.assertTrue(np.all(np.mean(lowercase_ , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowercase_ , axis=0 ) - 1 ) < 1E-3 ) )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
# Tests that all call wrap to encode_plus and batch_encode_plus
lowercase_ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowercase_ : int = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase_ : Any = [np.asarray(lowercase_ ) for speech_input in speech_inputs]
# Test not batched input
lowercase_ : Optional[int] = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
lowercase_ : Any = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1E-3 ) )
# Test batched
lowercase_ : Dict = feat_extract(lowercase_ , return_tensors="""np""" ).input_values
lowercase_ : Optional[Any] = feat_extract(lowercase_ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(lowercase_ , lowercase_ ):
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1E-3 ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase_ : int = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase_ : List[Any] = ["""longest""", """max_length""", """do_not_pad"""]
lowercase_ : int = [None, 1600, None]
for max_length, padding in zip(lowercase_ , lowercase_ ):
lowercase_ : Any = feat_extract(lowercase_ , padding=lowercase_ , max_length=lowercase_ , return_tensors="""np""" )
lowercase_ : Optional[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase_ : Any = range(800 , 1400 , 200 )
lowercase_ : int = [floats_list((1, x) )[0] for x in lengths]
lowercase_ : int = ["""longest""", """max_length""", """do_not_pad"""]
lowercase_ : str = [None, 1600, None]
for max_length, padding in zip(lowercase_ , lowercase_ ):
lowercase_ : Tuple = feat_extract(lowercase_ , max_length=lowercase_ , padding=lowercase_ )
lowercase_ : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase_ : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase_ : int = feat_extract(
lowercase_ , truncation=lowercase_ , max_length=1000 , padding="""max_length""" , return_tensors="""np""" )
lowercase_ : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase_ : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase_ : str = feat_extract(
lowercase_ , truncation=lowercase_ , max_length=1000 , padding="""longest""" , return_tensors="""np""" )
lowercase_ : List[str] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
lowercase_ : List[str] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase_ : Optional[int] = feat_extract(
lowercase_ , truncation=lowercase_ , max_length=2000 , padding="""longest""" , return_tensors="""np""" )
lowercase_ : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase_ : Tuple = np.random.rand(100 ).astype(np.floataa )
lowercase_ : Any = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowercase_ : List[Any] = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowercase_ : Any = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def SCREAMING_SNAKE_CASE_ ( self : int ):
# Tests that all call wrap to encode_plus and batch_encode_plus
lowercase_ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowercase_ : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase_ : Tuple = [np.asarray(lowercase_ ) for speech_input in speech_inputs]
# Test feature size
lowercase_ : Dict = feature_extractor(audio_target=lowercase_ , padding=lowercase_ , return_tensors="""np""" ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
lowercase_ : Optional[int] = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_values
lowercase_ : Any = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1E-3 ) )
# Test batched
lowercase_ : Dict = feature_extractor(lowercase_ , return_tensors="""np""" ).input_values
lowercase_ : int = feature_extractor(lowercase_ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(lowercase_ , lowercase_ ):
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowercase_ : str = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowercase_ : str = np.asarray(lowercase_ )
lowercase_ : Any = feature_extractor(lowercase_ , return_tensors="""np""" ).input_values
lowercase_ : Dict = feature_extractor(lowercase_ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(lowercase_ , lowercase_ ):
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1E-3 ) )
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : Tuple = self.feat_extract_tester.prepare_inputs_for_target()
lowercase_ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
lowercase_ : List[str] = feat_extract.model_input_names[0]
lowercase_ : Dict = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(lowercase_ ) == len(lowercase_ ) for x, y in zip(lowercase_ , processed_features[input_name] ) ) )
lowercase_ : List[Any] = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowercase_ )
lowercase_ : List[str] = BatchFeature({input_name: speech_inputs} , tensor_type="""np""" )
lowercase_ : List[Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowercase_ : List[Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : List[Any] = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowercase_ )
lowercase_ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
lowercase_ : Union[str, Any] = feat_extract.model_input_names[0]
lowercase_ : List[Any] = BatchFeature({input_name: speech_inputs} , tensor_type="""pt""" )
lowercase_ : Union[str, Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowercase_ : List[Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
lowercase_ : List[str] = self.feat_extract_tester.prepare_inputs_for_target()
lowercase_ : List[str] = feat_extract.model_input_names[0]
lowercase_ : Tuple = BatchFeature({input_name: speech_inputs} )
lowercase_ : str = feat_extract.num_mel_bins # hack!
lowercase_ : Optional[int] = feat_extract.pad(lowercase_ , padding="""longest""" , return_tensors="""np""" )[input_name]
lowercase_ : Dict = feat_extract.pad(lowercase_ , padding="""longest""" , return_tensors="""pt""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : Optional[Any] = self.feat_extract_dict
lowercase_ : str = True
lowercase_ : Optional[Any] = self.feature_extraction_class(**lowercase_ )
lowercase_ : str = self.feat_extract_tester.prepare_inputs_for_target()
lowercase_ : Any = [len(lowercase_ ) for x in speech_inputs]
lowercase_ : List[str] = feat_extract.model_input_names[0]
lowercase_ : Any = BatchFeature({input_name: speech_inputs} )
lowercase_ : Union[str, Any] = feat_extract.num_mel_bins # hack!
lowercase_ : Dict = feat_extract.pad(lowercase_ , padding="""longest""" , return_tensors="""np""" )
self.assertIn("""attention_mask""" , lowercase_ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Dict = self.feat_extract_dict
lowercase_ : Tuple = True
lowercase_ : int = self.feature_extraction_class(**lowercase_ )
lowercase_ : int = self.feat_extract_tester.prepare_inputs_for_target()
lowercase_ : List[Any] = [len(lowercase_ ) for x in speech_inputs]
lowercase_ : Dict = feat_extract.model_input_names[0]
lowercase_ : List[str] = BatchFeature({input_name: speech_inputs} )
lowercase_ : List[Any] = min(lowercase_ )
lowercase_ : str = feat_extract.num_mel_bins # hack!
lowercase_ : Union[str, Any] = feat_extract.pad(
lowercase_ , padding="""max_length""" , max_length=lowercase_ , truncation=lowercase_ , return_tensors="""np""" )
self.assertIn("""attention_mask""" , lowercase_ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : Dict ):
from datasets import load_dataset
lowercase_ : Tuple = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
lowercase_ : Optional[Any] = ds.sort("""id""" ).select(range(lowercase_ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
# fmt: off
lowercase_ : int = torch.tensor(
[2.3804E-03, 2.0752E-03, 1.9836E-03, 2.1057E-03, 1.6174E-03,
3.0518E-04, 9.1553E-05, 3.3569E-04, 9.7656E-04, 1.8311E-03,
2.0142E-03, 2.1057E-03, 1.7395E-03, 4.5776E-04, -3.9673E-04,
4.5776E-04, 1.0071E-03, 9.1553E-05, 4.8828E-04, 1.1597E-03,
7.3242E-04, 9.4604E-04, 1.8005E-03, 1.8311E-03, 8.8501E-04,
4.2725E-04, 4.8828E-04, 7.3242E-04, 1.0986E-03, 2.1057E-03] )
# fmt: on
lowercase_ : List[str] = self._load_datasamples(1 )
lowercase_ : str = SpeechTaFeatureExtractor()
lowercase_ : int = feature_extractor(lowercase_ , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 93680) )
self.assertTrue(torch.allclose(input_values[0, :30] , lowercase_ , atol=1E-6 ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
# fmt: off
lowercase_ : int = torch.tensor(
[-2.68_70, -3.01_04, -3.13_56, -3.53_52, -3.00_44, -3.03_53, -3.47_19, -3.67_77,
-3.15_20, -2.94_35, -2.65_53, -2.87_95, -2.99_44, -2.59_21, -3.02_79, -3.03_86,
-3.08_64, -3.12_91, -3.23_53, -2.74_44, -2.68_31, -2.72_87, -3.17_61, -3.15_71,
-3.27_26, -3.05_82, -3.10_07, -3.45_33, -3.46_95, -3.09_98] )
# fmt: on
lowercase_ : str = self._load_datasamples(1 )
lowercase_ : str = SpeechTaFeatureExtractor()
lowercase_ : int = feature_extractor(audio_target=lowercase_ , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowercase_ , atol=1E-4 ) )
| 21
|
'''simple docstring'''
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
_lowercase : str = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase)
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : str , *lowercase_ : int , **lowercase_ : Any ):
super().__init__(*lowercase_ , **lowercase_ )
requires_backends(self , """decord""" )
self.check_model_type(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : str=None , lowercase_ : Union[str, Any]=None , lowercase_ : List[Any]=None ):
lowercase_ : Union[str, Any] = {}
if frame_sampling_rate is not None:
lowercase_ : Any = frame_sampling_rate
if num_frames is not None:
lowercase_ : Optional[Any] = num_frames
lowercase_ : Union[str, Any] = {}
if top_k is not None:
lowercase_ : Optional[Any] = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : str , lowercase_ : Union[str, List[str]] , **lowercase_ : str ):
return super().__call__(lowercase_ , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str]=None , lowercase_ : Optional[int]=1 ):
if num_frames is None:
lowercase_ : List[Any] = self.model.config.num_frames
if video.startswith("""http://""" ) or video.startswith("""https://""" ):
lowercase_ : Union[str, Any] = BytesIO(requests.get(lowercase_ ).content )
lowercase_ : Optional[Any] = VideoReader(lowercase_ )
videoreader.seek(0 )
lowercase_ : Tuple = 0
lowercase_ : List[Any] = num_frames * frame_sampling_rate - 1
lowercase_ : Optional[int] = np.linspace(lowercase_ , lowercase_ , num=lowercase_ , dtype=np.intaa )
lowercase_ : Optional[int] = videoreader.get_batch(lowercase_ ).asnumpy()
lowercase_ : Union[str, Any] = list(lowercase_ )
lowercase_ : Optional[Any] = self.image_processor(lowercase_ , return_tensors=self.framework )
return model_inputs
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : str ):
lowercase_ : int = self.model(**lowercase_ )
return model_outputs
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : List[Any] , lowercase_ : Dict=5 ):
if top_k > self.model.config.num_labels:
lowercase_ : List[Any] = self.model.config.num_labels
if self.framework == "pt":
lowercase_ : str = model_outputs.logits.softmax(-1 )[0]
lowercase_ , lowercase_ : Optional[Any] = probs.topk(lowercase_ )
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
lowercase_ : Union[str, Any] = scores.tolist()
lowercase_ : Tuple = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowercase_ , lowercase_ )]
| 21
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __magic_name__ ( _UpperCAmelCase):
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : List[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowercase_ , """tf_padding""" ) )
self.parent.assertTrue(hasattr(lowercase_ , """depth_multiplier""" ) )
class __magic_name__ :
def __init__( self : List[str] , lowercase_ : List[Any] , lowercase_ : int=13 , lowercase_ : List[str]=3 , lowercase_ : Tuple=32 , lowercase_ : List[Any]=0.25 , lowercase_ : int=8 , lowercase_ : Any=8 , lowercase_ : Tuple=6 , lowercase_ : Union[str, Any]=32 , lowercase_ : Tuple=True , lowercase_ : Optional[Any]=True , lowercase_ : Optional[Any]=True , lowercase_ : Optional[int]="relu6" , lowercase_ : Dict=1280 , lowercase_ : Any=0.1 , lowercase_ : Any=0.02 , lowercase_ : List[Any]=True , lowercase_ : List[str]=True , lowercase_ : List[str]=10 , lowercase_ : Optional[Any]=None , ):
lowercase_ : Optional[int] = parent
lowercase_ : Any = batch_size
lowercase_ : List[str] = num_channels
lowercase_ : Union[str, Any] = image_size
lowercase_ : List[Any] = depth_multiplier
lowercase_ : Dict = depth_divisible_by
lowercase_ : Optional[Any] = min_depth
lowercase_ : Dict = expand_ratio
lowercase_ : str = tf_padding
lowercase_ : List[str] = output_stride
lowercase_ : List[str] = first_layer_is_expansion
lowercase_ : List[str] = finegrained_output
lowercase_ : Optional[Any] = hidden_act
lowercase_ : Any = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
lowercase_ : List[str] = classifier_dropout_prob
lowercase_ : List[Any] = use_labels
lowercase_ : int = is_training
lowercase_ : Tuple = num_labels
lowercase_ : Any = initializer_range
lowercase_ : Dict = scope
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : int = None
lowercase_ : List[str] = None
if self.use_labels:
lowercase_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
lowercase_ : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowercase_ : int = self.get_config()
return config, pixel_values, labels, pixel_labels
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : Tuple ):
lowercase_ : Union[str, Any] = MobileNetVaModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Optional[Any] = model(lowercase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : List[Any] ):
lowercase_ : Any = self.num_labels
lowercase_ : int = MobileNetVaForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : int = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : int ):
lowercase_ : Optional[int] = self.num_labels
lowercase_ : Optional[int] = MobileNetVaForSemanticSegmentation(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : str = model(lowercase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowercase_ : Dict = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : Optional[int] = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Tuple = config_and_inputs
lowercase_ : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCamelCase__ = (
{
'''feature-extraction''': MobileNetVaModel,
'''image-classification''': MobileNetVaForImageClassification,
'''image-segmentation''': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Tuple = MobileNetVaModelTester(self )
lowercase_ : Optional[int] = MobileNetVaConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV2 does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE_ ( self : int ):
pass
@unittest.skip(reason="""MobileNetV2 does not support input and output embeddings""" )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
pass
@unittest.skip(reason="""MobileNetV2 does not output attentions""" )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
pass
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ , lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : List[Any] = model_class(lowercase_ )
lowercase_ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Union[str, Any] = [*signature.parameters.keys()]
lowercase_ : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
def check_hidden_states_output(lowercase_ : List[Any] , lowercase_ : List[str] , lowercase_ : int ):
lowercase_ : int = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : Optional[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : Optional[int] = outputs.hidden_states
lowercase_ : List[Any] = 16
self.assertEqual(len(lowercase_ ) , lowercase_ )
lowercase_ , lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : str = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : str = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowercase_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : List[str] = MobileNetVaModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def lowerCamelCase ( ) -> Tuple:
lowercase_ : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : str ):
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v2_1.0_224""" ) if is_vision_available() else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : str = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v2_1.0_224""" ).to(lowercase_ )
lowercase_ : Optional[Any] = self.default_image_processor
lowercase_ : Optional[Any] = prepare_img()
lowercase_ : Dict = image_processor(images=lowercase_ , return_tensors="""pt""" ).to(lowercase_ )
# forward pass
with torch.no_grad():
lowercase_ : Optional[int] = model(**lowercase_ )
# verify the logits
lowercase_ : Optional[Any] = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , lowercase_ )
lowercase_ : List[str] = torch.tensor([0.24_45, -1.19_93, 0.19_05] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : Any = MobileNetVaForSemanticSegmentation.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
lowercase_ : List[str] = model.to(lowercase_ )
lowercase_ : Optional[Any] = MobileNetVaImageProcessor.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
lowercase_ : Optional[Any] = prepare_img()
lowercase_ : str = image_processor(images=lowercase_ , return_tensors="""pt""" ).to(lowercase_ )
# forward pass
with torch.no_grad():
lowercase_ : int = model(**lowercase_ )
lowercase_ : List[Any] = outputs.logits
# verify the logits
lowercase_ : List[Any] = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , lowercase_ )
lowercase_ : List[Any] = torch.tensor(
[
[[17.57_90, 17.75_81, 18.33_55], [18.32_57, 18.42_30, 18.89_73], [18.61_69, 18.86_50, 19.21_87]],
[[-2.15_95, -2.09_77, -2.37_41], [-2.42_26, -2.30_28, -2.68_35], [-2.78_19, -2.59_91, -2.77_06]],
[[4.20_58, 4.83_17, 4.76_38], [4.41_36, 5.03_61, 4.93_83], [4.50_28, 4.96_44, 4.87_34]],
] , device=lowercase_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowercase_ , atol=1E-4 ) )
| 21
|
'''simple docstring'''
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] ) -> List[str]:
if isinstance(UpperCAmelCase__ , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class __magic_name__ :
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Any , lowercase_ : str ):
pass
def SCREAMING_SNAKE_CASE_ ( self : str ):
pass
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
pass
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : np.ndarray , lowercase_ : np.ndarray , lowercase_ : float ):
lowercase_ : Optional[Any] = np.abs((a - b) ).max()
self.assertLessEqual(lowercase_ , lowercase_ , f'''Difference between torch and flax is {diff} (>= {tol}).''' )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : Tuple=None , **lowercase_ : Optional[int] ):
lowercase_ : Any = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase_ , lowercase_ )
lowercase_ : Any = FlaxVisionTextDualEncoderModel(lowercase_ )
lowercase_ : List[Any] = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : List[str] , lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : List[Any]=None , **lowercase_ : Tuple ):
lowercase_ , lowercase_ : Any = self.get_vision_text_model(lowercase_ , lowercase_ )
lowercase_ : Optional[int] = {"""vision_model""": vision_model, """text_model""": text_model}
lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase_ )
lowercase_ : List[Any] = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : str , lowercase_ : Dict , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : Optional[Any]=None , **lowercase_ : int ):
lowercase_ , lowercase_ : Union[str, Any] = self.get_vision_text_model(lowercase_ , lowercase_ )
lowercase_ : Optional[Any] = {"""vision_model""": vision_model, """text_model""": text_model}
lowercase_ : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase_ )
lowercase_ : Tuple = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ )
lowercase_ : Any = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase_ )
lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained(lowercase_ )
lowercase_ : List[str] = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ )
lowercase_ : Union[str, Any] = after_output[0]
lowercase_ : str = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowercase_ , 1E-3 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : Dict=None , **lowercase_ : Optional[Any] ):
lowercase_ , lowercase_ : Optional[int] = self.get_vision_text_model(lowercase_ , lowercase_ )
lowercase_ : Dict = {"""vision_model""": vision_model, """text_model""": text_model}
lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase_ )
lowercase_ : Optional[int] = model(
input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , output_attentions=lowercase_ )
lowercase_ : Tuple = output.vision_model_output.attentions
self.assertEqual(len(lowercase_ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase_ : List[str] = to_atuple(vision_model.config.image_size )
lowercase_ : Optional[Any] = to_atuple(vision_model.config.patch_size )
lowercase_ : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowercase_ : Optional[Any] = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowercase_ : Union[str, Any] = output.text_model_output.attentions
self.assertEqual(len(lowercase_ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : int ):
pt_model.to(lowercase_ )
pt_model.eval()
# prepare inputs
lowercase_ : int = inputs_dict
lowercase_ : Tuple = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
lowercase_ : str = pt_model(**lowercase_ ).to_tuple()
lowercase_ : Optional[Any] = fx_model(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowercase_ , pt_output.numpy() , 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowercase_ )
lowercase_ : Tuple = FlaxVisionTextDualEncoderModel.from_pretrained(lowercase_ , from_pt=lowercase_ )
lowercase_ : Dict = fx_model_loaded(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowercase_ , pt_output.numpy() , 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowercase_ )
lowercase_ : Union[str, Any] = VisionTextDualEncoderModel.from_pretrained(lowercase_ , from_flax=lowercase_ )
pt_model_loaded.to(lowercase_ )
pt_model_loaded.eval()
with torch.no_grad():
lowercase_ : List[Any] = pt_model_loaded(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(lowercase_ , pt_output_loaded.numpy() , 4E-2 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Any , lowercase_ : str , lowercase_ : Union[str, Any] ):
lowercase_ : Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase_ , lowercase_ )
lowercase_ : List[Any] = VisionTextDualEncoderModel(lowercase_ )
lowercase_ : Union[str, Any] = FlaxVisionTextDualEncoderModel(lowercase_ )
lowercase_ : Optional[Any] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowercase_ )
lowercase_ : Tuple = fx_state
self.check_pt_flax_equivalence(lowercase_ , lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : List[Any] ):
lowercase_ : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase_ , lowercase_ )
lowercase_ : int = VisionTextDualEncoderModel(lowercase_ )
lowercase_ : Dict = FlaxVisionTextDualEncoderModel(lowercase_ )
lowercase_ : Optional[Any] = load_flax_weights_in_pytorch_model(lowercase_ , fx_model.params )
self.check_pt_flax_equivalence(lowercase_ , lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : Tuple = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : List[Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : List[Any] = self.prepare_config_and_inputs()
self.check_save_load(**lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Union[str, Any] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowercase_ )
@is_pt_flax_cross_test
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Tuple = self.prepare_config_and_inputs()
lowercase_ : List[Any] = config_inputs_dict.pop("""vision_config""" )
lowercase_ : int = config_inputs_dict.pop("""text_config""" )
lowercase_ : Optional[int] = config_inputs_dict
self.check_equivalence_pt_to_flax(lowercase_ , lowercase_ , lowercase_ )
self.check_equivalence_flax_to_pt(lowercase_ , lowercase_ , lowercase_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ , lowercase_ : str = self.get_pretrained_model_and_inputs()
lowercase_ : Dict = model_a(**lowercase_ )
lowercase_ : str = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowercase_ )
lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained(lowercase_ )
lowercase_ : str = model_a(**lowercase_ )
lowercase_ : Union[str, Any] = after_outputs[0]
lowercase_ : Any = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowercase_ , 1E-5 )
@require_flax
class __magic_name__ ( _UpperCAmelCase, unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=lowercase_ , text_from_pt=lowercase_ , )
lowercase_ : List[str] = 13
lowercase_ : Optional[Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowercase_ : Any = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
lowercase_ : str = random_attention_mask([batch_size, 4] )
lowercase_ : List[str] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : List[Any] , lowercase_ : Tuple ):
lowercase_ : Union[str, Any] = FlaxViTModel(lowercase_ )
lowercase_ : Dict = FlaxBertModel(lowercase_ )
return vision_model, text_model
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : Any = FlaxViTModelTester(self )
lowercase_ : Optional[Any] = FlaxBertModelTester(self )
lowercase_ : Dict = vit_model_tester.prepare_config_and_inputs()
lowercase_ : Optional[Any] = bert_model_tester.prepare_config_and_inputs()
lowercase_ , lowercase_ : List[str] = vision_config_and_inputs
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class __magic_name__ ( _UpperCAmelCase, unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-clip""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=lowercase_ , text_from_pt=lowercase_ , )
lowercase_ : List[str] = 13
lowercase_ : Optional[Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowercase_ : int = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
lowercase_ : Tuple = random_attention_mask([batch_size, 4] )
lowercase_ : Union[str, Any] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] ):
lowercase_ : Tuple = FlaxCLIPVisionModel(lowercase_ )
lowercase_ : Any = FlaxBertModel(lowercase_ )
return vision_model, text_model
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Union[str, Any] = FlaxCLIPVisionModelTester(self )
lowercase_ : Tuple = FlaxBertModelTester(self )
lowercase_ : Union[str, Any] = clip_model_tester.prepare_config_and_inputs()
lowercase_ : Any = bert_model_tester.prepare_config_and_inputs()
lowercase_ , lowercase_ : Optional[Any] = vision_config_and_inputs
lowercase_ , lowercase_ , lowercase_ , lowercase_ : str = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class __magic_name__ ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : List[str] = FlaxVisionTextDualEncoderModel.from_pretrained("""clip-italian/clip-italian""" , logit_scale_init_value=1.0 )
lowercase_ : Optional[Any] = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
lowercase_ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowercase_ : Optional[int] = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=lowercase_ , padding=lowercase_ , return_tensors="""np""" )
lowercase_ : List[str] = model(**lowercase_ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
lowercase_ : Optional[Any] = np.array([[1.2_28_47_27, 0.3_10_41_22]] )
self.assertTrue(np.allclose(outputs.logits_per_image , lowercase_ , atol=1E-3 ) )
| 21
| 1
|
'''simple docstring'''
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class __magic_name__ :
def __init__( self : Tuple , lowercase_ : Collection[float] | None = None ):
if components is None:
lowercase_ : Dict = []
lowercase_ : Tuple = list(lowercase_ )
def __len__( self : str ):
return len(self.__components )
def __str__( self : List[Any] ):
return "(" + ",".join(map(lowercase_ , self.__components ) ) + ")"
def __add__( self : List[Any] , lowercase_ : Vector ):
lowercase_ : Optional[Any] = len(self )
if size == len(lowercase_ ):
lowercase_ : List[Any] = [self.__components[i] + other.component(lowercase_ ) for i in range(lowercase_ )]
return Vector(lowercase_ )
else:
raise Exception("""must have the same size""" )
def __sub__( self : Dict , lowercase_ : Vector ):
lowercase_ : List[str] = len(self )
if size == len(lowercase_ ):
lowercase_ : str = [self.__components[i] - other.component(lowercase_ ) for i in range(lowercase_ )]
return Vector(lowercase_ )
else: # error case
raise Exception("""must have the same size""" )
@overload
def __mul__( self : List[Any] , lowercase_ : float ):
...
@overload
def __mul__( self : Dict , lowercase_ : Vector ):
...
def __mul__( self : Any , lowercase_ : float | Vector ):
if isinstance(lowercase_ , (float, int) ):
lowercase_ : Dict = [c * other for c in self.__components]
return Vector(lowercase_ )
elif isinstance(lowercase_ , lowercase_ ) and len(self ) == len(lowercase_ ):
lowercase_ : Union[str, Any] = len(self )
lowercase_ : List[Any] = [self.__components[i] * other.component(lowercase_ ) for i in range(lowercase_ )]
return sum(lowercase_ )
else: # error case
raise Exception("""invalid operand!""" )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
return Vector(self.__components )
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : int ):
if isinstance(lowercase_ , lowercase_ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("""index out of range""" )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : int , lowercase_ : float ):
assert -len(self.__components ) <= pos < len(self.__components )
lowercase_ : List[Any] = value
def SCREAMING_SNAKE_CASE_ ( self : int ):
if len(self.__components ) == 0:
raise Exception("""Vector is empty""" )
lowercase_ : Union[str, Any] = [c**2 for c in self.__components]
return math.sqrt(sum(lowercase_ ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Vector , lowercase_ : bool = False ):
lowercase_ : List[Any] = self * other
lowercase_ : List[Any] = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def lowerCamelCase ( UpperCAmelCase__ : int ) -> Vector:
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
return Vector([0] * dimension )
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> Vector:
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and (isinstance(UpperCAmelCase__ , UpperCAmelCase__ ))
lowercase_ : Dict = [0] * dimension
lowercase_ : List[Any] = 1
return Vector(UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : float , UpperCAmelCase__ : Vector , UpperCAmelCase__ : Vector ) -> Vector:
assert (
isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
and isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
and (isinstance(UpperCAmelCase__ , (int, float) ))
)
return x * scalar + y
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> Vector:
random.seed(UpperCAmelCase__ )
lowercase_ : Optional[int] = [random.randint(UpperCAmelCase__ , UpperCAmelCase__ ) for _ in range(UpperCAmelCase__ )]
return Vector(UpperCAmelCase__ )
class __magic_name__ :
def __init__( self : Union[str, Any] , lowercase_ : list[list[float]] , lowercase_ : int , lowercase_ : int ):
lowercase_ : int = matrix
lowercase_ : Optional[int] = w
lowercase_ : Dict = h
def __str__( self : Any ):
lowercase_ : Optional[int] = """"""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : Dict , lowercase_ : Matrix ):
if self.__width == other.width() and self.__height == other.height():
lowercase_ : int = []
for i in range(self.__height ):
lowercase_ : List[str] = [
self.__matrix[i][j] + other.component(lowercase_ , lowercase_ )
for j in range(self.__width )
]
matrix.append(lowercase_ )
return Matrix(lowercase_ , self.__width , self.__height )
else:
raise Exception("""matrix must have the same dimension!""" )
def __sub__( self : Any , lowercase_ : Matrix ):
if self.__width == other.width() and self.__height == other.height():
lowercase_ : Optional[int] = []
for i in range(self.__height ):
lowercase_ : Optional[int] = [
self.__matrix[i][j] - other.component(lowercase_ , lowercase_ )
for j in range(self.__width )
]
matrix.append(lowercase_ )
return Matrix(lowercase_ , self.__width , self.__height )
else:
raise Exception("""matrices must have the same dimension!""" )
@overload
def __mul__( self : List[str] , lowercase_ : float ):
...
@overload
def __mul__( self : List[Any] , lowercase_ : Vector ):
...
def __mul__( self : str , lowercase_ : float | Vector ):
if isinstance(lowercase_ , lowercase_ ): # matrix-vector
if len(lowercase_ ) == self.__width:
lowercase_ : int = zero_vector(self.__height )
for i in range(self.__height ):
lowercase_ : List[str] = [
self.__matrix[i][j] * other.component(lowercase_ )
for j in range(self.__width )
]
ans.change_component(lowercase_ , sum(lowercase_ ) )
return ans
else:
raise Exception(
"""vector must have the same size as the """
"""number of columns of the matrix!""" )
elif isinstance(lowercase_ , (int, float) ): # matrix-scalar
lowercase_ : List[str] = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(lowercase_ , self.__width , self.__height )
return None
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
return self.__height
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
return self.__width
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : int , lowercase_ : int ):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("""change_component: indices out of bounds""" )
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : int , lowercase_ : int , lowercase_ : float ):
if 0 <= x < self.__height and 0 <= y < self.__width:
lowercase_ : int = value
else:
raise Exception("""change_component: indices out of bounds""" )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : int , lowercase_ : int ):
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
lowercase_ : int = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(lowercase_ ) ):
lowercase_ : Optional[int] = minor[i][:y] + minor[i][y + 1 :]
return Matrix(lowercase_ , self.__width - 1 , self.__height - 1 ).determinant()
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : int , lowercase_ : int ):
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(lowercase_ , lowercase_ )
else:
raise Exception("""Indices out of bounds""" )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if self.__height < 1:
raise Exception("""Matrix has no element""" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
lowercase_ : Any = [
self.__matrix[0][y] * self.cofactor(0 , lowercase_ ) for y in range(self.__width )
]
return sum(lowercase_ )
def lowerCamelCase ( UpperCAmelCase__ : int ) -> Matrix:
lowercase_ : list[list[float]] = [[0] * n for _ in range(UpperCAmelCase__ )]
return Matrix(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> Matrix:
random.seed(UpperCAmelCase__ )
lowercase_ : list[list[float]] = [
[random.randint(UpperCAmelCase__ , UpperCAmelCase__ ) for _ in range(UpperCAmelCase__ )] for _ in range(UpperCAmelCase__ )
]
return Matrix(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
| 21
|
'''simple docstring'''
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class __magic_name__ ( unittest.TestCase):
def __init__( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : int=7 , lowercase_ : Optional[Any]=3 , lowercase_ : Optional[Any]=18 , lowercase_ : List[Any]=30 , lowercase_ : int=400 , lowercase_ : Dict=True , lowercase_ : List[Any]=None , lowercase_ : Dict=True , ):
lowercase_ : Tuple = size if size is not None else {"""height""": 18, """width""": 18}
lowercase_ : List[str] = parent
lowercase_ : Any = batch_size
lowercase_ : Optional[Any] = num_channels
lowercase_ : Tuple = image_size
lowercase_ : Optional[Any] = min_resolution
lowercase_ : Dict = max_resolution
lowercase_ : Optional[int] = do_resize
lowercase_ : Optional[Any] = size
lowercase_ : Union[str, Any] = do_normalize
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04],
[-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class __magic_name__ ( _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = ImageGPTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Optional[int] = ImageGPTImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , """clusters""" ) )
self.assertTrue(hasattr(lowercase_ , """do_resize""" ) )
self.assertTrue(hasattr(lowercase_ , """size""" ) )
self.assertTrue(hasattr(lowercase_ , """do_normalize""" ) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
lowercase_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : int = self.image_processing_class(**self.image_processor_dict )
lowercase_ : Union[str, Any] = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase_ , obj[key] ) )
else:
self.assertEqual(obj[key] , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : str = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase_ : Union[str, Any] = os.path.join(lowercase_ , """image_processor.json""" )
image_processor_first.to_json_file(lowercase_ )
lowercase_ : Optional[Any] = self.image_processing_class.from_json_file(lowercase_ ).to_dict()
lowercase_ : Any = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowercase_ )
lowercase_ : Any = self.image_processing_class.from_pretrained(lowercase_ ).to_dict()
lowercase_ : List[str] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowercase_ )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
pass
def lowerCamelCase ( ) -> Any:
lowercase_ : Union[str, Any] = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" )
lowercase_ : Any = Image.open(dataset[4]["""file"""] )
lowercase_ : Dict = Image.open(dataset[5]["""file"""] )
lowercase_ : int = [imagea, imagea]
return images
@require_vision
@require_torch
class __magic_name__ ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Optional[Any] = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
lowercase_ : Optional[int] = prepare_images()
# test non-batched
lowercase_ : str = image_processing(images[0] , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1024) )
lowercase_ : Tuple = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , lowercase_ )
# test batched
lowercase_ : List[str] = image_processing(lowercase_ , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1024) )
lowercase_ : Union[str, Any] = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowercase_ )
| 21
| 1
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __magic_name__ ( _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = ConsistencyModelPipeline
UpperCamelCase__ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
UpperCamelCase__ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
UpperCamelCase__ = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
])
@property
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : str = UNetaDModel.from_pretrained(
"""diffusers/consistency-models-test""" , subfolder="""test_unet""" , )
return unet
@property
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : str = UNetaDModel.from_pretrained(
"""diffusers/consistency-models-test""" , subfolder="""test_unet_class_cond""" , )
return unet
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Optional[Any]=False ):
if class_cond:
lowercase_ : str = self.dummy_cond_unet
else:
lowercase_ : Tuple = self.dummy_uncond_unet
# Default to CM multistep sampler
lowercase_ : int = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
lowercase_ : Tuple = {
"""unet""": unet,
"""scheduler""": scheduler,
}
return components
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Optional[Any]=0 ):
if str(lowercase_ ).startswith("""mps""" ):
lowercase_ : List[str] = torch.manual_seed(lowercase_ )
else:
lowercase_ : Union[str, Any] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowercase_ : Optional[Any] = {
"""batch_size""": 1,
"""num_inference_steps""": None,
"""timesteps""": [22, 0],
"""generator""": generator,
"""output_type""": """np""",
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase_ : Optional[int] = self.get_dummy_components()
lowercase_ : List[Any] = ConsistencyModelPipeline(**lowercase_ )
lowercase_ : Tuple = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
lowercase_ : Union[str, Any] = self.get_dummy_inputs(lowercase_ )
lowercase_ : Dict = pipe(**lowercase_ ).images
assert image.shape == (1, 32, 32, 3)
lowercase_ : List[str] = image[0, -3:, -3:, -1]
lowercase_ : str = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase_ : Dict = self.get_dummy_components(class_cond=lowercase_ )
lowercase_ : List[Any] = ConsistencyModelPipeline(**lowercase_ )
lowercase_ : Union[str, Any] = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
lowercase_ : List[str] = self.get_dummy_inputs(lowercase_ )
lowercase_ : Tuple = 0
lowercase_ : Optional[Any] = pipe(**lowercase_ ).images
assert image.shape == (1, 32, 32, 3)
lowercase_ : str = image[0, -3:, -3:, -1]
lowercase_ : Optional[int] = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase_ : int = self.get_dummy_components()
lowercase_ : Tuple = ConsistencyModelPipeline(**lowercase_ )
lowercase_ : Any = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
lowercase_ : Union[str, Any] = self.get_dummy_inputs(lowercase_ )
lowercase_ : Dict = 1
lowercase_ : Optional[Any] = None
lowercase_ : Optional[Any] = pipe(**lowercase_ ).images
assert image.shape == (1, 32, 32, 3)
lowercase_ : Tuple = image[0, -3:, -3:, -1]
lowercase_ : str = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase_ : int = self.get_dummy_components(class_cond=lowercase_ )
lowercase_ : List[str] = ConsistencyModelPipeline(**lowercase_ )
lowercase_ : Dict = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
lowercase_ : Optional[int] = self.get_dummy_inputs(lowercase_ )
lowercase_ : str = 1
lowercase_ : Tuple = None
lowercase_ : str = 0
lowercase_ : Optional[Any] = pipe(**lowercase_ ).images
assert image.shape == (1, 32, 32, 3)
lowercase_ : str = image[0, -3:, -3:, -1]
lowercase_ : Any = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Union[str, Any]=0 , lowercase_ : Optional[Any]=False , lowercase_ : Optional[Any]="cpu" , lowercase_ : str=torch.floataa , lowercase_ : Dict=(1, 3, 64, 64) ):
lowercase_ : Union[str, Any] = torch.manual_seed(lowercase_ )
lowercase_ : List[Any] = {
"""num_inference_steps""": None,
"""timesteps""": [22, 0],
"""class_labels""": 0,
"""generator""": generator,
"""output_type""": """np""",
}
if get_fixed_latents:
lowercase_ : List[Any] = self.get_fixed_latents(seed=lowercase_ , device=lowercase_ , dtype=lowercase_ , shape=lowercase_ )
lowercase_ : Dict = latents
return inputs
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Tuple=0 , lowercase_ : Union[str, Any]="cpu" , lowercase_ : int=torch.floataa , lowercase_ : str=(1, 3, 64, 64) ):
if type(lowercase_ ) == str:
lowercase_ : List[Any] = torch.device(lowercase_ )
lowercase_ : Optional[Any] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowercase_ : Any = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ )
return latents
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : List[Any] = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
lowercase_ : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
lowercase_ : Optional[int] = ConsistencyModelPipeline(unet=lowercase_ , scheduler=lowercase_ )
pipe.to(torch_device=lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
lowercase_ : Tuple = self.get_inputs()
lowercase_ : Optional[Any] = pipe(**lowercase_ ).images
assert image.shape == (1, 64, 64, 3)
lowercase_ : Optional[Any] = image[0, -3:, -3:, -1]
lowercase_ : Tuple = np.array([0.08_88, 0.08_81, 0.06_66, 0.04_79, 0.02_92, 0.01_95, 0.02_01, 0.01_63, 0.02_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Any = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
lowercase_ : List[str] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
lowercase_ : Optional[int] = ConsistencyModelPipeline(unet=lowercase_ , scheduler=lowercase_ )
pipe.to(torch_device=lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
lowercase_ : Optional[Any] = self.get_inputs()
lowercase_ : List[Any] = 1
lowercase_ : Dict = None
lowercase_ : int = pipe(**lowercase_ ).images
assert image.shape == (1, 64, 64, 3)
lowercase_ : Optional[int] = image[0, -3:, -3:, -1]
lowercase_ : Dict = np.array([0.03_40, 0.01_52, 0.00_63, 0.02_67, 0.02_21, 0.01_07, 0.04_16, 0.01_86, 0.02_17] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
@require_torch_a
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : List[str] = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
lowercase_ : int = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
lowercase_ : int = ConsistencyModelPipeline(unet=lowercase_ , scheduler=lowercase_ )
pipe.to(torch_device=lowercase_ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=lowercase_ )
lowercase_ : Union[str, Any] = self.get_inputs(get_fixed_latents=lowercase_ , device=lowercase_ )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=lowercase_ , enable_math=lowercase_ , enable_mem_efficient=lowercase_ ):
lowercase_ : List[Any] = pipe(**lowercase_ ).images
assert image.shape == (1, 64, 64, 3)
lowercase_ : Tuple = image[0, -3:, -3:, -1]
lowercase_ : int = np.array([0.18_75, 0.14_28, 0.12_89, 0.21_51, 0.20_92, 0.14_77, 0.18_77, 0.16_41, 0.13_53] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@require_torch_a
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : Dict = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
lowercase_ : List[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
lowercase_ : Optional[int] = ConsistencyModelPipeline(unet=lowercase_ , scheduler=lowercase_ )
pipe.to(torch_device=lowercase_ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=lowercase_ )
lowercase_ : Union[str, Any] = self.get_inputs(get_fixed_latents=lowercase_ , device=lowercase_ )
lowercase_ : int = 1
lowercase_ : List[Any] = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=lowercase_ , enable_math=lowercase_ , enable_mem_efficient=lowercase_ ):
lowercase_ : Any = pipe(**lowercase_ ).images
assert image.shape == (1, 64, 64, 3)
lowercase_ : Any = image[0, -3:, -3:, -1]
lowercase_ : Optional[int] = np.array([0.16_63, 0.19_48, 0.22_75, 0.16_80, 0.12_04, 0.12_45, 0.18_58, 0.13_38, 0.20_95] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 21
|
'''simple docstring'''
def lowerCamelCase ( ) -> Dict:
lowercase_ : Union[str, Any] = []
lowercase_ : Tuple = 1
while len(UpperCAmelCase__ ) < 1e6:
constant.append(str(UpperCAmelCase__ ) )
i += 1
lowercase_ : int = """""".join(UpperCAmelCase__ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution())
| 21
| 1
|
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> str:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError("""iterations must be defined as integers""" )
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or not number >= 1:
raise ValueError(
"""starting number must be
and integer and be more than 0""" )
if not iterations >= 1:
raise ValueError("""Iterations must be done more than 0 times to play FizzBuzz""" )
lowercase_ : Dict = """"""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(UpperCAmelCase__ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
_lowercase : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : str ):
super().__init__()
self.register_modules(unet=lowercase_ , scheduler=lowercase_ )
@torch.no_grad()
def __call__( self : List[str] , lowercase_ : int = 1 , lowercase_ : int = 100 , lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ : Optional[float] = None , lowercase_ : bool = True , ):
if audio_length_in_s is None:
lowercase_ : List[Any] = self.unet.config.sample_size / self.unet.config.sample_rate
lowercase_ : Dict = audio_length_in_s * self.unet.config.sample_rate
lowercase_ : Any = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
lowercase_ : List[Any] = int(lowercase_ )
if sample_size % down_scale_factor != 0:
lowercase_ : int = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
""" process.""" )
lowercase_ : Any = int(lowercase_ )
lowercase_ : List[str] = next(iter(self.unet.parameters() ) ).dtype
lowercase_ : List[str] = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowercase_ : Any = randn_tensor(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_ )
# set step values
self.scheduler.set_timesteps(lowercase_ , device=audio.device )
lowercase_ : Optional[Any] = self.scheduler.timesteps.to(lowercase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowercase_ : Dict = self.unet(lowercase_ , lowercase_ ).sample
# 2. compute previous image: x_t -> t_t-1
lowercase_ : List[str] = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample
lowercase_ : str = audio.clamp(-1 , 1 ).float().cpu().numpy()
lowercase_ : Union[str, Any] = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowercase_ )
| 21
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase : Optional[Any] = {"configuration_xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = ["XLNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = ["XLNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[Any] = [
"XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLNetForMultipleChoice",
"XLNetForQuestionAnswering",
"XLNetForQuestionAnsweringSimple",
"XLNetForSequenceClassification",
"XLNetForTokenClassification",
"XLNetLMHeadModel",
"XLNetModel",
"XLNetPreTrainedModel",
"load_tf_weights_in_xlnet",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple = [
"TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLNetForMultipleChoice",
"TFXLNetForQuestionAnsweringSimple",
"TFXLNetForSequenceClassification",
"TFXLNetForTokenClassification",
"TFXLNetLMHeadModel",
"TFXLNetMainLayer",
"TFXLNetModel",
"TFXLNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
_lowercase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 21
|
'''simple docstring'''
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
_lowercase : Union[str, Any] = "src/transformers"
_lowercase : str = "docs/source/en"
_lowercase : Union[str, Any] = "."
def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] ) -> int:
with open(UpperCAmelCase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowercase_ : Union[str, Any] = f.readlines()
# Find the start prompt.
lowercase_ : Optional[Any] = 0
while not lines[start_index].startswith(UpperCAmelCase__ ):
start_index += 1
start_index += 1
lowercase_ : int = start_index
while not lines[end_index].startswith(UpperCAmelCase__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
_lowercase : int = "Model|Encoder|Decoder|ForConditionalGeneration"
# Regexes that match TF/Flax/PT model names.
_lowercase : str = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
_lowercase : Optional[Any] = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_lowercase : int = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# This is to make sure the transformers module imported is the one in the repo.
_lowercase : Optional[Any] = direct_transformers_import(TRANSFORMERS_PATH)
def lowerCamelCase ( UpperCAmelCase__ : int ) -> Any:
lowercase_ : str = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , UpperCAmelCase__ )
return [m.group(0 ) for m in matches]
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple ) -> List[Any]:
lowercase_ : Dict = 2 if text == """✅""" or text == """❌""" else len(UpperCAmelCase__ )
lowercase_ : List[str] = (width - text_length) // 2
lowercase_ : Dict = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def lowerCamelCase ( ) -> Any:
lowercase_ : int = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
lowercase_ : Any = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
lowercase_ : int = {name: config.replace("""Config""" , """""" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
lowercase_ : List[Any] = collections.defaultdict(UpperCAmelCase__ )
lowercase_ : List[str] = collections.defaultdict(UpperCAmelCase__ )
lowercase_ : Any = collections.defaultdict(UpperCAmelCase__ )
lowercase_ : Tuple = collections.defaultdict(UpperCAmelCase__ )
lowercase_ : Optional[int] = collections.defaultdict(UpperCAmelCase__ )
# Let's lookup through all transformers object (once).
for attr_name in dir(UpperCAmelCase__ ):
lowercase_ : Union[str, Any] = None
if attr_name.endswith("""Tokenizer""" ):
lowercase_ : Optional[int] = slow_tokenizers
lowercase_ : Union[str, Any] = attr_name[:-9]
elif attr_name.endswith("""TokenizerFast""" ):
lowercase_ : Optional[Any] = fast_tokenizers
lowercase_ : Dict = attr_name[:-13]
elif _re_tf_models.match(UpperCAmelCase__ ) is not None:
lowercase_ : str = tf_models
lowercase_ : str = _re_tf_models.match(UpperCAmelCase__ ).groups()[0]
elif _re_flax_models.match(UpperCAmelCase__ ) is not None:
lowercase_ : List[str] = flax_models
lowercase_ : int = _re_flax_models.match(UpperCAmelCase__ ).groups()[0]
elif _re_pt_models.match(UpperCAmelCase__ ) is not None:
lowercase_ : Tuple = pt_models
lowercase_ : Optional[int] = _re_pt_models.match(UpperCAmelCase__ ).groups()[0]
if lookup_dict is not None:
while len(UpperCAmelCase__ ) > 0:
if attr_name in model_name_to_prefix.values():
lowercase_ : int = True
break
# Try again after removing the last word in the name
lowercase_ : Optional[Any] = """""".join(camel_case_split(UpperCAmelCase__ )[:-1] )
# Let's build that table!
lowercase_ : Dict = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
lowercase_ : Optional[Any] = ["""Model""", """Tokenizer slow""", """Tokenizer fast""", """PyTorch support""", """TensorFlow support""", """Flax Support"""]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
lowercase_ : Union[str, Any] = [len(UpperCAmelCase__ ) + 2 for c in columns]
lowercase_ : int = max([len(UpperCAmelCase__ ) for name in model_names] ) + 2
# Build the table per se
lowercase_ : Tuple = """|""" + """|""".join([_center_text(UpperCAmelCase__ , UpperCAmelCase__ ) for c, w in zip(UpperCAmelCase__ , UpperCAmelCase__ )] ) + """|\n"""
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([""":""" + """-""" * (w - 2) + """:""" for w in widths] ) + "|\n"
lowercase_ : int = {True: """✅""", False: """❌"""}
for name in model_names:
lowercase_ : str = model_name_to_prefix[name]
lowercase_ : Any = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(UpperCAmelCase__ , UpperCAmelCase__ ) for l, w in zip(UpperCAmelCase__ , UpperCAmelCase__ )] ) + "|\n"
return table
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any]=False ) -> str:
lowercase_ , lowercase_ , lowercase_ , lowercase_ : List[str] = _find_text_in_file(
filename=os.path.join(UpperCAmelCase__ , """index.md""" ) , start_prompt="""<!--This table is updated automatically from the auto modules""" , end_prompt="""<!-- End table-->""" , )
lowercase_ : Dict = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(UpperCAmelCase__ , """index.md""" ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"""The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.""" )
if __name__ == "__main__":
_lowercase : Any = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_lowercase : Optional[Any] = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 21
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
_lowercase : List[Any] = None
_lowercase : Tuple = logging.get_logger(__name__)
_lowercase : int = "▁"
_lowercase : Dict = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
_lowercase : Any = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"},
"tokenizer_file": {
"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"
},
}
_lowercase : Optional[int] = {
"google/pegasus-xsum": 512,
}
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = PegasusTokenizer
UpperCamelCase__ = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[int] , lowercase_ : int=None , lowercase_ : List[Any]=None , lowercase_ : str="<pad>" , lowercase_ : List[str]="</s>" , lowercase_ : int="<unk>" , lowercase_ : List[str]="<mask_2>" , lowercase_ : Union[str, Any]="<mask_1>" , lowercase_ : Union[str, Any]=None , lowercase_ : List[Any]=103 , **lowercase_ : Union[str, Any] , ):
lowercase_ : Union[str, Any] = offset
if additional_special_tokens is not None:
if not isinstance(lowercase_ , lowercase_ ):
raise TypeError(
f'''additional_special_tokens should be of type {type(lowercase_ )}, but is'''
f''' {type(lowercase_ )}''' )
lowercase_ : int = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(lowercase_ ) , self.offset - 1 )
]
if len(set(lowercase_ ) ) != len(lowercase_ ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
lowercase_ : Optional[int] = additional_special_tokens_extended
else:
lowercase_ : Optional[int] = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , pad_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , mask_token=lowercase_ , mask_token_sent=lowercase_ , offset=lowercase_ , additional_special_tokens=lowercase_ , **lowercase_ , )
lowercase_ : List[str] = vocab_file
lowercase_ : List[str] = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Dict ):
lowercase_ : List[Any] = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"""There should be 3 special tokens: mask_token, pad_token, and eos_token +"""
f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : List , lowercase_ : Optional[List] = None , lowercase_ : bool = False ):
if already_has_special_tokens:
return self._special_token_mask(lowercase_ )
elif token_ids_a is None:
return self._special_token_mask(lowercase_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : Any , lowercase_ : Union[str, Any]=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : str , lowercase_ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(lowercase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase_ : List[Any] = os.path.join(
lowercase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ):
copyfile(self.vocab_file , lowercase_ )
return (out_vocab_file,)
| 21
|
'''simple docstring'''
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class __magic_name__ ( ctypes.Structure):
# _fields is a specific attr expected by ctypes
UpperCamelCase__ = [('''size''', ctypes.c_int), ('''visible''', ctypes.c_byte)]
def lowerCamelCase ( ) -> List[Any]:
if os.name == "nt":
lowercase_ : List[Any] = CursorInfo()
lowercase_ : int = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) )
lowercase_ : List[str] = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25l""" )
sys.stdout.flush()
def lowerCamelCase ( ) -> str:
if os.name == "nt":
lowercase_ : int = CursorInfo()
lowercase_ : Optional[Any] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) )
lowercase_ : Optional[int] = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25h""" )
sys.stdout.flush()
@contextmanager
def lowerCamelCase ( ) -> Any:
try:
hide_cursor()
yield
finally:
show_cursor()
| 21
| 1
|
'''simple docstring'''
# Function to print upper half of diamond (pyramid)
def lowerCamelCase ( UpperCAmelCase__ : Tuple ) -> Any:
for i in range(0 , UpperCAmelCase__ ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(""" """ , end="""""" )
for _ in range(0 , i + 1 ): # printing stars
print("""* """ , end="""""" )
print()
def lowerCamelCase ( UpperCAmelCase__ : List[Any] ) -> Any:
for i in range(UpperCAmelCase__ , 0 , -1 ):
for _ in range(UpperCAmelCase__ , 0 , -1 ): # printing stars
print("""* """ , end="""""" )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(""" """ , end="""""" )
def lowerCamelCase ( UpperCAmelCase__ : str ) -> str:
if n <= 0:
print(""" ... .... nothing printing :(""" )
return
floyd(UpperCAmelCase__ ) # upper half
reverse_floyd(UpperCAmelCase__ ) # lower half
if __name__ == "__main__":
print(r"| /\ | |- | |- |--| |\ /| |-")
print(r"|/ \| |- |_ |_ |__| | \/ | |_")
_lowercase : Dict = 1
while K:
_lowercase : Optional[int] = int(input("enter the number and , and see the magic : "))
print()
pretty_print(user_number)
_lowercase : List[str] = int(input("press 0 to exit... and 1 to continue..."))
print("Good Bye...")
| 21
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
_lowercase : int = logging.get_logger(__name__)
@dataclass
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self : Optional[Any] , **lowercase_ : int ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowercase_ : Optional[int] = deprecated_arg[3:]
setattr(self , lowercase_ , not kwargs.pop(lowercase_ ) )
logger.warning(
f'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'''
f''' {positive_arg}={kwargs[positive_arg]}''' )
lowercase_ : Tuple = kwargs.pop("""torchscript""" , self.torchscript )
lowercase_ : List[Any] = kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics )
lowercase_ : List[Any] = kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level )
super().__init__(**lowercase_ )
UpperCamelCase__ = field(default=_UpperCAmelCase, metadata={'''help''': '''Trace the models using torchscript'''})
UpperCamelCase__ = field(default=_UpperCAmelCase, metadata={'''help''': '''Print Xla/PyTorch tpu metrics'''})
UpperCamelCase__ = field(
default='''O1''', metadata={
'''help''': (
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '''
'''See details at https://nvidia.github.io/apex/amp.html'''
)
}, )
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
requires_backends(self , ["""torch"""] )
logger.info("""PyTorch: setting up devices""" )
if not self.cuda:
lowercase_ : Optional[Any] = torch.device("""cpu""" )
lowercase_ : Tuple = 0
elif is_torch_tpu_available():
lowercase_ : Optional[int] = xm.xla_device()
lowercase_ : str = 0
else:
lowercase_ : int = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
lowercase_ : str = torch.cuda.device_count()
return device, n_gpu
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
return is_torch_tpu_available() and self.tpu
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
requires_backends(self , ["""torch"""] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
requires_backends(self , ["""torch"""] )
return self._setup_devices[0]
@property
def SCREAMING_SNAKE_CASE_ ( self : int ):
requires_backends(self , ["""torch"""] )
return self._setup_devices[1]
@property
def SCREAMING_SNAKE_CASE_ ( self : int ):
return self.n_gpu > 0
| 21
| 1
|
'''simple docstring'''
import argparse
from collections import defaultdict
def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[Any] ) -> List[Any]:
lowercase_ : Any = F'''{file}_{class_name}_{test_name}'''
done_test[_id] += 1
with open(UpperCAmelCase__ , """r""" ) as f:
lowercase_ : int = f.readlines()
lowercase_ : int = F'''class {class_name}('''
lowercase_ : int = F'''{4 * ' '}def {test_name}('''
lowercase_ : Any = F'''{8 * ' '}{correct_line.split()[0]}'''
lowercase_ : Optional[int] = F'''{16 * ' '}{correct_line.split()[0]}'''
lowercase_ : Union[str, Any] = False
lowercase_ : Optional[Any] = False
lowercase_ : List[Any] = False
lowercase_ : List[Any] = False
lowercase_ : List[str] = 0
lowercase_ : List[str] = 0
lowercase_ : Optional[int] = []
for line in lines:
if line.startswith(UpperCAmelCase__ ):
lowercase_ : List[Any] = True
elif in_class and line.startswith(UpperCAmelCase__ ):
lowercase_ : Optional[int] = True
elif in_class and in_func and (line.startswith(UpperCAmelCase__ ) or line.startswith(UpperCAmelCase__ )):
lowercase_ : List[str] = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
lowercase_ : Any = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
lowercase_ : Dict = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F'''{spaces * ' '}{correct_line}''' )
lowercase_ : List[Any] = False
else:
new_lines.append(UpperCAmelCase__ )
with open(UpperCAmelCase__ , """w""" ) as f:
for line in new_lines:
f.write(UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any]=None ) -> str:
if fail is not None:
with open(UpperCAmelCase__ , """r""" ) as f:
lowercase_ : Tuple = {l.strip() for l in f.readlines()}
else:
lowercase_ : List[str] = None
with open(UpperCAmelCase__ , """r""" ) as f:
lowercase_ : str = f.readlines()
lowercase_ : Union[str, Any] = defaultdict(UpperCAmelCase__ )
for line in correct_lines:
lowercase_ , lowercase_ , lowercase_ , lowercase_ : str = line.split(""";""" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
_lowercase : int = argparse.ArgumentParser()
parser.add_argument("--correct_filename", help="filename of tests with expected result")
parser.add_argument("--fail_filename", help="filename of test failures", type=str, default=None)
_lowercase : Union[str, Any] = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 21
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
def lowerCamelCase ( UpperCAmelCase__ : list ) -> int:
if not postfix_notation:
return 0
lowercase_ : Any = {"""+""", """-""", """*""", """/"""}
lowercase_ : list[Any] = []
for token in postfix_notation:
if token in operations:
lowercase_ , lowercase_ : Dict = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(UpperCAmelCase__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21
| 1
|
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : int ) -> list[int]:
if length <= 0 or not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError("""Length must be a positive integer.""" )
return [n * (2 * n - 1) for n in range(UpperCAmelCase__ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 21
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
_lowercase : List[Any] = logging.get_logger(__name__)
def lowerCamelCase ( UpperCAmelCase__ : Union[tf.Tensor, np.ndarray] ) -> List[int]:
if isinstance(UpperCAmelCase__ , np.ndarray ):
return list(tensor.shape )
lowercase_ : Tuple = tf.shape(UpperCAmelCase__ )
if tensor.shape == tf.TensorShape(UpperCAmelCase__ ):
return dynamic
lowercase_ : Dict = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(UpperCAmelCase__ )]
def lowerCamelCase ( UpperCAmelCase__ : tf.Tensor , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[str] = None ) -> tf.Tensor:
return tf.nn.softmax(logits=logits + 1e-9 , axis=UpperCAmelCase__ , name=UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple=1e-5 , UpperCAmelCase__ : List[str]=-1 ) -> List[str]:
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise NotImplementedError("""Only 1D weight and bias tensors are supported for now, with only a single axis.""" )
# Get mean and variance on the axis to be normalized
lowercase_ , lowercase_ : List[str] = tf.nn.moments(UpperCAmelCase__ , axes=[axis] , keepdims=UpperCAmelCase__ )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
lowercase_ : List[Any] = [1] * inputs.shape.rank
lowercase_ : List[str] = shape_list(UpperCAmelCase__ )[axis]
lowercase_ : List[str] = tf.reshape(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : List[Any] = tf.reshape(UpperCAmelCase__ , UpperCAmelCase__ )
# Compute layer normalization using the batch_normalization
# function.
lowercase_ : str = tf.nn.batch_normalization(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , offset=UpperCAmelCase__ , scale=UpperCAmelCase__ , variance_epsilon=UpperCAmelCase__ , )
return outputs
def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple=0 , UpperCAmelCase__ : Any=-1 ) -> Dict:
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
lowercase_ : List[Any] = tf.shape(UpperCAmelCase__ )
lowercase_ : Union[str, Any] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
lowercase_ : Dict = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : tf.Tensor ) -> tf.Tensor:
if not isinstance(UpperCAmelCase__ , tf.Tensor ):
lowercase_ : List[Any] = tf.convert_to_tensor(UpperCAmelCase__ ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
lowercase_ : Any = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
lowercase_ : List[Any] = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
lowercase_ : Optional[Any] = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def lowerCamelCase ( UpperCAmelCase__ : tf.Tensor , UpperCAmelCase__ : int , UpperCAmelCase__ : str = "input_ids" ) -> None:
tf.debugging.assert_less(
UpperCAmelCase__ , tf.cast(UpperCAmelCase__ , dtype=tensor.dtype ) , message=(
F'''The maximum value of {tensor_name} ({tf.math.reduce_max(UpperCAmelCase__ )}) must be smaller than the embedding '''
F'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'''
) , )
def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] ) -> Any:
lowercase_ : int = 64512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
lowercase_ : Optional[Any] = [x for x in data if len(UpperCAmelCase__ ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"""The following attributes cannot be saved to HDF5 file because """
F'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} '''
F'''bytes: {bad_attributes}''' )
lowercase_ : Any = np.asarray(UpperCAmelCase__ )
lowercase_ : Union[str, Any] = 1
lowercase_ : Optional[Any] = np.array_split(UpperCAmelCase__ , UpperCAmelCase__ )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
lowercase_ : Optional[Any] = np.array_split(UpperCAmelCase__ , UpperCAmelCase__ )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(UpperCAmelCase__ ):
lowercase_ : Union[str, Any] = chunk_data
else:
lowercase_ : Any = data
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] ) -> str:
if name in group.attrs:
lowercase_ : Optional[Any] = [n.decode("""utf8""" ) if hasattr(UpperCAmelCase__ , """decode""" ) else n for n in group.attrs[name]]
else:
lowercase_ : int = []
lowercase_ : Optional[int] = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("""utf8""" ) if hasattr(UpperCAmelCase__ , """decode""" ) else n for n in group.attrs["""%s%d""" % (name, chunk_id)]] )
chunk_id += 1
return data
def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] ) -> Any:
def _expand_single_ad_tensor(UpperCAmelCase__ : Optional[Any] ):
if isinstance(UpperCAmelCase__ , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(UpperCAmelCase__ , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , UpperCAmelCase__ )
| 21
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class __magic_name__ :
def __init__( self : str , lowercase_ : List[str] , ):
lowercase_ : Dict = parent
lowercase_ : Any = 13
lowercase_ : Dict = 7
lowercase_ : List[str] = True
lowercase_ : Union[str, Any] = True
lowercase_ : Any = True
lowercase_ : Tuple = 99
lowercase_ : Union[str, Any] = 32
lowercase_ : Dict = 2
lowercase_ : Any = 4
lowercase_ : Union[str, Any] = 37
lowercase_ : int = """gelu"""
lowercase_ : List[Any] = 0.1
lowercase_ : Optional[Any] = 0.1
lowercase_ : List[Any] = 512
lowercase_ : Optional[int] = 16
lowercase_ : List[Any] = 2
lowercase_ : str = 0.02
lowercase_ : Optional[int] = 3
lowercase_ : Optional[int] = 4
lowercase_ : Optional[int] = None
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ : Optional[Any] = None
if self.use_input_mask:
lowercase_ : str = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ : Dict = None
lowercase_ : int = None
lowercase_ : Optional[int] = None
if self.use_labels:
lowercase_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ : str = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ : Any = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Any = self.prepare_config_and_inputs()
lowercase_ : Optional[Any] = True
lowercase_ : Any = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase_ : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : Any , lowercase_ : int ):
lowercase_ : Optional[int] = TFEsmModel(config=lowercase_ )
lowercase_ : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask}
lowercase_ : List[Any] = model(lowercase_ )
lowercase_ : Tuple = [input_ids, input_mask]
lowercase_ : Union[str, Any] = model(lowercase_ )
lowercase_ : Optional[int] = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Any , lowercase_ : str , lowercase_ : Any , lowercase_ : str , lowercase_ : Dict , lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , ):
lowercase_ : Tuple = True
lowercase_ : int = TFEsmModel(config=lowercase_ )
lowercase_ : int = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""encoder_hidden_states""": encoder_hidden_states,
"""encoder_attention_mask""": encoder_attention_mask,
}
lowercase_ : str = model(lowercase_ )
lowercase_ : Union[str, Any] = [input_ids, input_mask]
lowercase_ : Optional[Any] = model(lowercase_ , encoder_hidden_states=lowercase_ )
# Also check the case where encoder outputs are not passed
lowercase_ : str = model(lowercase_ , attention_mask=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : str , lowercase_ : Any , lowercase_ : Tuple ):
lowercase_ : Union[str, Any] = TFEsmForMaskedLM(config=lowercase_ )
lowercase_ : Optional[int] = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : int , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] ):
lowercase_ : Optional[Any] = self.num_labels
lowercase_ : Union[str, Any] = TFEsmForTokenClassification(config=lowercase_ )
lowercase_ : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
lowercase_ : List[str] = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : Optional[Any] = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Dict = config_and_inputs
lowercase_ : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCamelCase__ = (
{
'''feature-extraction''': TFEsmModel,
'''fill-mask''': TFEsmForMaskedLM,
'''text-classification''': TFEsmForSequenceClassification,
'''token-classification''': TFEsmForTokenClassification,
'''zero-shot''': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Dict = TFEsmModelTester(self )
lowercase_ : Optional[int] = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Optional[Any] = TFEsmModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@unittest.skip("""Protein models do not support embedding resizing.""" )
def SCREAMING_SNAKE_CASE_ ( self : int ):
pass
@unittest.skip("""Protein models do not support embedding resizing.""" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
pass
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Optional[Any] = model_class(lowercase_ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowercase_ : Dict = model.get_bias()
assert isinstance(lowercase_ , lowercase_ )
for k, v in name.items():
assert isinstance(lowercase_ , tf.Variable )
else:
lowercase_ : Any = model.get_output_embeddings()
assert x is None
lowercase_ : Optional[int] = model.get_bias()
assert name is None
@require_tf
class __magic_name__ ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Dict = TFEsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
lowercase_ : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase_ : List[Any] = model(lowercase_ )[0]
lowercase_ : Tuple = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , lowercase_ )
# compare the actual values for a slice.
lowercase_ : Optional[int] = tf.constant(
[
[
[8.92_15_18, -10.58_98_14, -6.4_67_13_07],
[-6.3_96_71_56, -13.91_13_77, -1.1_21_19_15],
[-7.78_12_47, -13.95_15_57, -3.74_05_92],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : List[str] = TFEsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
lowercase_ : Any = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowercase_ : List[Any] = model(lowercase_ )[0]
# compare the actual values for a slice.
lowercase_ : List[Any] = tf.constant(
[
[
[0.14_44_30_92, 0.54_12_53_27, 0.3_24_77_39],
[0.30_34_04_84, 0.00_52_66_76, 0.31_07_77_22],
[0.32_27_80_43, -0.24_98_70_96, 0.3_41_46_28],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 21
|
'''simple docstring'''
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def lowerCamelCase ( UpperCAmelCase__ : int ) -> int:
lowercase_ : Any = prime_factors(UpperCAmelCase__ )
if is_square_free(UpperCAmelCase__ ):
return -1 if len(UpperCAmelCase__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21
| 1
|
'''simple docstring'''
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class __magic_name__ ( unittest.TestCase):
def __init__( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : int=7 , lowercase_ : Optional[Any]=3 , lowercase_ : Optional[Any]=18 , lowercase_ : List[Any]=30 , lowercase_ : int=400 , lowercase_ : Dict=True , lowercase_ : List[Any]=None , lowercase_ : Dict=True , ):
lowercase_ : Tuple = size if size is not None else {"""height""": 18, """width""": 18}
lowercase_ : List[str] = parent
lowercase_ : Any = batch_size
lowercase_ : Optional[Any] = num_channels
lowercase_ : Tuple = image_size
lowercase_ : Optional[Any] = min_resolution
lowercase_ : Dict = max_resolution
lowercase_ : Optional[int] = do_resize
lowercase_ : Optional[Any] = size
lowercase_ : Union[str, Any] = do_normalize
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04],
[-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class __magic_name__ ( _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = ImageGPTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Optional[int] = ImageGPTImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , """clusters""" ) )
self.assertTrue(hasattr(lowercase_ , """do_resize""" ) )
self.assertTrue(hasattr(lowercase_ , """size""" ) )
self.assertTrue(hasattr(lowercase_ , """do_normalize""" ) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
lowercase_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : int = self.image_processing_class(**self.image_processor_dict )
lowercase_ : Union[str, Any] = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase_ , obj[key] ) )
else:
self.assertEqual(obj[key] , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : str = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase_ : Union[str, Any] = os.path.join(lowercase_ , """image_processor.json""" )
image_processor_first.to_json_file(lowercase_ )
lowercase_ : Optional[Any] = self.image_processing_class.from_json_file(lowercase_ ).to_dict()
lowercase_ : Any = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowercase_ )
lowercase_ : Any = self.image_processing_class.from_pretrained(lowercase_ ).to_dict()
lowercase_ : List[str] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowercase_ )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
pass
def lowerCamelCase ( ) -> Any:
lowercase_ : Union[str, Any] = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" )
lowercase_ : Any = Image.open(dataset[4]["""file"""] )
lowercase_ : Dict = Image.open(dataset[5]["""file"""] )
lowercase_ : int = [imagea, imagea]
return images
@require_vision
@require_torch
class __magic_name__ ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Optional[Any] = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
lowercase_ : Optional[int] = prepare_images()
# test non-batched
lowercase_ : str = image_processing(images[0] , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1024) )
lowercase_ : Tuple = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , lowercase_ )
# test batched
lowercase_ : List[str] = image_processing(lowercase_ , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1024) )
lowercase_ : Union[str, Any] = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowercase_ )
| 21
|
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : int = 1000000 ) -> int:
lowercase_ : List[Any] = limit + 1
lowercase_ : Optional[Any] = [0] * limit
for first_term in range(1 , UpperCAmelCase__ ):
for n in range(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ : List[Any] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
lowercase_ : List[Any] = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 21
| 1
|
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any]=False ) -> str:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ : Optional[Any] = len(set_a.intersection(UpperCAmelCase__ ) )
if alternative_union:
lowercase_ : Dict = len(UpperCAmelCase__ ) + len(UpperCAmelCase__ )
else:
lowercase_ : List[Any] = len(set_a.union(UpperCAmelCase__ ) )
return intersection / union
if isinstance(UpperCAmelCase__ , (list, tuple) ) and isinstance(UpperCAmelCase__ , (list, tuple) ):
lowercase_ : List[str] = [element for element in set_a if element in set_b]
if alternative_union:
lowercase_ : Optional[Any] = len(UpperCAmelCase__ ) + len(UpperCAmelCase__ )
return len(UpperCAmelCase__ ) / union
else:
lowercase_ : str = set_a + [element for element in set_b if element not in set_a]
return len(UpperCAmelCase__ ) / len(UpperCAmelCase__ )
return len(UpperCAmelCase__ ) / len(UpperCAmelCase__ )
return None
if __name__ == "__main__":
_lowercase : Union[str, Any] = {"a", "b", "c", "d", "e"}
_lowercase : Dict = {"c", "d", "e", "f", "h", "i"}
print(jaccard_similarity(set_a, set_b))
| 21
|
'''simple docstring'''
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __magic_name__ ( unittest.TestCase):
@parameterized.expand([(None,), ("""foo.json""",)] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : str ):
lowercase_ : Union[str, Any] = GenerationConfig(
do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowercase_ , config_name=lowercase_ )
lowercase_ : Optional[int] = GenerationConfig.from_pretrained(lowercase_ , config_name=lowercase_ )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , lowercase_ )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : int = AutoConfig.from_pretrained("""gpt2""" )
lowercase_ : List[Any] = GenerationConfig.from_model_config(lowercase_ )
lowercase_ : Optional[int] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(lowercase_ , lowercase_ )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Optional[int] = GenerationConfig()
lowercase_ : int = {
"""max_new_tokens""": 1024,
"""foo""": """bar""",
}
lowercase_ : List[str] = copy.deepcopy(lowercase_ )
lowercase_ : Tuple = generation_config.update(**lowercase_ )
# update_kwargs was not modified (no side effects)
self.assertEqual(lowercase_ , lowercase_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(lowercase_ , {"""foo""": """bar"""} )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : Dict = GenerationConfig()
lowercase_ : int = """bar"""
with tempfile.TemporaryDirectory("""test-generation-config""" ) as tmp_dir:
generation_config.save_pretrained(lowercase_ )
lowercase_ : Optional[int] = GenerationConfig.from_pretrained(lowercase_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , """bar""" )
lowercase_ : List[str] = GenerationConfig.from_model_config(lowercase_ )
assert not hasattr(lowercase_ , """foo""" ) # no new kwargs should be initialized if from config
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Optional[int] = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , lowercase_ )
self.assertEqual(default_config.num_beams , 1 )
lowercase_ : Dict = GenerationConfig(
do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , lowercase_ )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowercase_ )
lowercase_ : Tuple = GenerationConfig.from_pretrained(lowercase_ , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , lowercase_ )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __magic_name__ ( unittest.TestCase):
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Any ):
lowercase_ : int = TOKEN
HfFolder.save_token(lowercase_ )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] ):
try:
delete_repo(token=cls._token , repo_id="""test-generation-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""" )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Tuple = GenerationConfig(
do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""test-generation-config""" , use_auth_token=self._token )
lowercase_ : List[Any] = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-generation-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowercase_ , repo_id="""test-generation-config""" , push_to_hub=lowercase_ , use_auth_token=self._token )
lowercase_ : int = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : List[Any] = GenerationConfig(
do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token )
lowercase_ : Optional[Any] = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowercase_ , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=lowercase_ , use_auth_token=self._token )
lowercase_ : int = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
| 21
| 1
|
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any]=1024 , UpperCAmelCase__ : Optional[int]=1024 , UpperCAmelCase__ : List[str]=False , **UpperCAmelCase__ : Dict ) -> Optional[int]:
lowercase_ : str = AutoTokenizer.from_pretrained(UpperCAmelCase__ )
lowercase_ : Union[str, Any] = SeqaSeqDataset(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , type_path="""train""" , **UpperCAmelCase__ )
lowercase_ : Optional[int] = tok.pad_token_id
def get_lens(UpperCAmelCase__ : Union[str, Any] ):
lowercase_ : Optional[int] = tqdm(
DataLoader(UpperCAmelCase__ , batch_size=512 , num_workers=8 , shuffle=UpperCAmelCase__ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
lowercase_ : Union[str, Any] = []
for batch in dl:
lowercase_ : List[str] = batch["""input_ids"""].ne(UpperCAmelCase__ ).sum(1 ).tolist()
lowercase_ : Any = batch["""labels"""].ne(UpperCAmelCase__ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
max_lens.append(max(UpperCAmelCase__ , UpperCAmelCase__ ) )
else:
max_lens.extend(UpperCAmelCase__ )
return max_lens
lowercase_ : Optional[Any] = get_lens(UpperCAmelCase__ )
lowercase_ : Tuple = SeqaSeqDataset(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , type_path="""val""" , **UpperCAmelCase__ )
lowercase_ : Optional[int] = get_lens(UpperCAmelCase__ )
pickle_save(UpperCAmelCase__ , train_ds.len_file )
pickle_save(UpperCAmelCase__ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 21
|
'''simple docstring'''
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] ) -> List[Any]:
# Initialise PyTorch model
lowercase_ : List[str] = FunnelConfig.from_json_file(UpperCAmelCase__ )
print(F'''Building PyTorch model from configuration: {config}''' )
lowercase_ : Dict = FunnelBaseModel(UpperCAmelCase__ ) if base_model else FunnelModel(UpperCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , UpperCAmelCase__ )
if __name__ == "__main__":
_lowercase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not."
)
_lowercase : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 21
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowercase : Union[str, Any] = {
"configuration_encodec": [
"ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EncodecConfig",
],
"feature_extraction_encodec": ["EncodecFeatureExtractor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] = [
"ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST",
"EncodecModel",
"EncodecPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 21
|
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
_lowercase : Optional[List[str]] = None
_lowercase : str = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
_lowercase : Optional[int] = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class __magic_name__ :
UpperCamelCase__ = True
UpperCamelCase__ = None
# Automatically constructed
UpperCamelCase__ = "PIL.Image.Image"
UpperCamelCase__ = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()})
UpperCamelCase__ = field(default='''Image''', init=_UpperCAmelCase, repr=_UpperCAmelCase)
def __call__( self : Tuple ):
return self.pa_type
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if isinstance(lowercase_ , lowercase_ ):
lowercase_ : int = np.array(lowercase_ )
if isinstance(lowercase_ , lowercase_ ):
return {"path": value, "bytes": None}
elif isinstance(lowercase_ , lowercase_ ):
return {"path": None, "bytes": value}
elif isinstance(lowercase_ , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(lowercase_ )
elif isinstance(lowercase_ , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(lowercase_ )
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : dict , lowercase_ : List[str]=None ):
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Image(decode=True) instead.""" )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support decoding images, please install 'Pillow'.""" )
if token_per_repo_id is None:
lowercase_ : Union[str, Any] = {}
lowercase_ , lowercase_ : List[Any] = value["""path"""], value["""bytes"""]
if bytes_ is None:
if path is None:
raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(lowercase_ ):
lowercase_ : int = PIL.Image.open(lowercase_ )
else:
lowercase_ : str = path.split("""::""" )[-1]
try:
lowercase_ : Any = string_to_dict(lowercase_ , config.HUB_DATASETS_URL )["""repo_id"""]
lowercase_ : Optional[Any] = token_per_repo_id.get(lowercase_ )
except ValueError:
lowercase_ : str = None
with xopen(lowercase_ , """rb""" , use_auth_token=lowercase_ ) as f:
lowercase_ : Dict = BytesIO(f.read() )
lowercase_ : Optional[Any] = PIL.Image.open(bytes_ )
else:
lowercase_ : Any = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def SCREAMING_SNAKE_CASE_ ( self : int ):
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
if pa.types.is_string(storage.type ):
lowercase_ : str = pa.array([None] * len(lowercase_ ) , type=pa.binary() )
lowercase_ : Any = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
lowercase_ : str = pa.array([None] * len(lowercase_ ) , type=pa.string() )
lowercase_ : Any = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
lowercase_ : Optional[int] = storage.field("""bytes""" )
else:
lowercase_ : Optional[Any] = pa.array([None] * len(lowercase_ ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
lowercase_ : Dict = storage.field("""path""" )
else:
lowercase_ : int = pa.array([None] * len(lowercase_ ) , type=pa.string() )
lowercase_ : Dict = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
lowercase_ : Optional[int] = pa.array(
[encode_np_array(np.array(lowercase_ ) )["""bytes"""] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
lowercase_ : Tuple = pa.array([None] * len(lowercase_ ) , type=pa.string() )
lowercase_ : Tuple = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(lowercase_ , self.pa_type )
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(lowercase_ : Optional[Any] ):
with xopen(lowercase_ , """rb""" ) as f:
lowercase_ : int = f.read()
return bytes_
lowercase_ : Optional[Any] = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowercase_ : Any = pa.array(
[os.path.basename(lowercase_ ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
lowercase_ : Dict = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(lowercase_ , self.pa_type )
def lowerCamelCase ( ) -> List[str]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
lowercase_ : int = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def lowerCamelCase ( UpperCAmelCase__ : "PIL.Image.Image" ) -> bytes:
lowercase_ : Tuple = BytesIO()
if image.format in list_image_compression_formats():
lowercase_ : int = image.format
else:
lowercase_ : int = """PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF"""
image.save(UpperCAmelCase__ , format=UpperCAmelCase__ )
return buffer.getvalue()
def lowerCamelCase ( UpperCAmelCase__ : "PIL.Image.Image" ) -> dict:
if hasattr(UpperCAmelCase__ , """filename""" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(UpperCAmelCase__ )}
def lowerCamelCase ( UpperCAmelCase__ : np.ndarray ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
lowercase_ : List[Any] = array.dtype
lowercase_ : int = dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER
lowercase_ : Dict = dtype.kind
lowercase_ : List[Any] = dtype.itemsize
lowercase_ : Any = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
lowercase_ : int = np.dtype("""|u1""" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
lowercase_ : str = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
lowercase_ : str = dtype_byteorder + dtype_kind + str(UpperCAmelCase__ )
lowercase_ : Optional[Any] = np.dtype(UpperCAmelCase__ )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
lowercase_ : Optional[int] = PIL.Image.fromarray(array.astype(UpperCAmelCase__ ) )
return {"path": None, "bytes": image_to_bytes(UpperCAmelCase__ )}
def lowerCamelCase ( UpperCAmelCase__ : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if objs:
lowercase_ , lowercase_ : Dict = first_non_null_value(UpperCAmelCase__ )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(UpperCAmelCase__ , np.ndarray ):
lowercase_ : Union[str, Any] = no_op_if_value_is_null(UpperCAmelCase__ )
return [obj_to_image_dict_func(UpperCAmelCase__ ) for obj in objs]
elif isinstance(UpperCAmelCase__ , PIL.Image.Image ):
lowercase_ : int = no_op_if_value_is_null(UpperCAmelCase__ )
return [obj_to_image_dict_func(UpperCAmelCase__ ) for obj in objs]
else:
return objs
else:
return objs
| 21
| 1
|
'''simple docstring'''
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase : Optional[int] = logging.get_logger(__name__)
def lowerCamelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] ) -> Dict:
lowercase_ : int = os.path.abspath(UpperCAmelCase__ )
logger.info(F'''Converting TensorFlow checkpoint from {tf_path}''' )
# Load weights from TF model
lowercase_ : Union[str, Any] = tf.train.list_variables(UpperCAmelCase__ )
lowercase_ : Tuple = []
lowercase_ : Dict = []
lowercase_ : Any = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
lowercase_ : int = full_name.split("""/""" )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(F'''Skipping non-model layer {full_name}''' )
continue
if "optimizer" in full_name:
logger.info(F'''Skipping optimization layer {full_name}''' )
continue
if name[0] == "model":
# ignore initial 'model'
lowercase_ : Optional[Any] = name[1:]
# figure out how many levels deep the name is
lowercase_ : Union[str, Any] = 0
for _name in name:
if _name.startswith("""layer_with_weights""" ):
depth += 1
else:
break
layer_depth.append(UpperCAmelCase__ )
# read data
lowercase_ : str = tf.train.load_variable(UpperCAmelCase__ , UpperCAmelCase__ )
names.append("""/""".join(UpperCAmelCase__ ) )
arrays.append(UpperCAmelCase__ )
logger.info(F'''Read a total of {len(UpperCAmelCase__ ):,} layers''' )
# Sanity check
if len(set(UpperCAmelCase__ ) ) != 1:
raise ValueError(F'''Found layer names with different depths (layer depth {list(set(UpperCAmelCase__ ) )})''' )
lowercase_ : List[Any] = list(set(UpperCAmelCase__ ) )[0]
if layer_depth != 1:
raise ValueError(
"""The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP"""
""" heads.""" )
# convert layers
logger.info("""Converting weights...""" )
for full_name, array in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ : List[Any] = full_name.split("""/""" )
lowercase_ : Tuple = model
lowercase_ : Any = []
for i, m_name in enumerate(UpperCAmelCase__ ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith("""layer_with_weights""" ):
lowercase_ : Optional[Any] = int(m_name.split("""-""" )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(["""embeddings""", """LayerNorm"""] )
lowercase_ : Dict = getattr(UpperCAmelCase__ , """embeddings""" )
lowercase_ : Tuple = getattr(UpperCAmelCase__ , """LayerNorm""" )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(["""encoder""", """layer""", str(layer_num - 4 )] )
lowercase_ : List[str] = getattr(UpperCAmelCase__ , """encoder""" )
lowercase_ : List[str] = getattr(UpperCAmelCase__ , """layer""" )
lowercase_ : str = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(["""pooler""", """dense"""] )
lowercase_ : Union[str, Any] = getattr(UpperCAmelCase__ , """pooler""" )
lowercase_ : List[str] = getattr(UpperCAmelCase__ , """dense""" )
elif m_name == "embeddings":
trace.append("""embeddings""" )
lowercase_ : Any = getattr(UpperCAmelCase__ , """embeddings""" )
if layer_num == 0:
trace.append("""word_embeddings""" )
lowercase_ : List[str] = getattr(UpperCAmelCase__ , """word_embeddings""" )
elif layer_num == 1:
trace.append("""position_embeddings""" )
lowercase_ : Optional[Any] = getattr(UpperCAmelCase__ , """position_embeddings""" )
elif layer_num == 2:
trace.append("""token_type_embeddings""" )
lowercase_ : int = getattr(UpperCAmelCase__ , """token_type_embeddings""" )
else:
raise ValueError(F'''Unknown embedding layer with name {full_name}''' )
trace.append("""weight""" )
lowercase_ : Tuple = getattr(UpperCAmelCase__ , """weight""" )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(["""attention""", """self"""] )
lowercase_ : Union[str, Any] = getattr(UpperCAmelCase__ , """attention""" )
lowercase_ : List[str] = getattr(UpperCAmelCase__ , """self""" )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(["""attention""", """output""", """LayerNorm"""] )
lowercase_ : str = getattr(UpperCAmelCase__ , """attention""" )
lowercase_ : Optional[Any] = getattr(UpperCAmelCase__ , """output""" )
lowercase_ : Dict = getattr(UpperCAmelCase__ , """LayerNorm""" )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(["""attention""", """output""", """dense"""] )
lowercase_ : Dict = getattr(UpperCAmelCase__ , """attention""" )
lowercase_ : Any = getattr(UpperCAmelCase__ , """output""" )
lowercase_ : Union[str, Any] = getattr(UpperCAmelCase__ , """dense""" )
elif m_name == "_output_dense":
# output dense
trace.extend(["""output""", """dense"""] )
lowercase_ : Dict = getattr(UpperCAmelCase__ , """output""" )
lowercase_ : str = getattr(UpperCAmelCase__ , """dense""" )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(["""output""", """LayerNorm"""] )
lowercase_ : str = getattr(UpperCAmelCase__ , """output""" )
lowercase_ : Dict = getattr(UpperCAmelCase__ , """LayerNorm""" )
elif m_name == "_key_dense":
# attention key
trace.append("""key""" )
lowercase_ : int = getattr(UpperCAmelCase__ , """key""" )
elif m_name == "_query_dense":
# attention query
trace.append("""query""" )
lowercase_ : List[str] = getattr(UpperCAmelCase__ , """query""" )
elif m_name == "_value_dense":
# attention value
trace.append("""value""" )
lowercase_ : List[Any] = getattr(UpperCAmelCase__ , """value""" )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(["""intermediate""", """dense"""] )
lowercase_ : Tuple = getattr(UpperCAmelCase__ , """intermediate""" )
lowercase_ : str = getattr(UpperCAmelCase__ , """dense""" )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append("""output""" )
lowercase_ : List[Any] = getattr(UpperCAmelCase__ , """output""" )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append("""bias""" )
lowercase_ : List[str] = getattr(UpperCAmelCase__ , """bias""" )
elif m_name in ["kernel", "gamma"]:
trace.append("""weight""" )
lowercase_ : Union[str, Any] = getattr(UpperCAmelCase__ , """weight""" )
else:
logger.warning(F'''Ignored {m_name}''' )
# for certain layers reshape is necessary
lowercase_ : Union[str, Any] = """.""".join(UpperCAmelCase__ )
if re.match(R"""(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)""" , UpperCAmelCase__ ) or re.match(
R"""(\S+)\.attention\.output\.dense\.weight""" , UpperCAmelCase__ ):
lowercase_ : str = array.reshape(pointer.data.shape )
if "kernel" in full_name:
lowercase_ : List[Any] = array.transpose()
if pointer.shape == array.shape:
lowercase_ : Union[str, Any] = torch.from_numpy(UpperCAmelCase__ )
else:
raise ValueError(
F'''Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:'''
F''' {array.shape}''' )
logger.info(F'''Successfully set variable {full_name} to PyTorch layer {trace}''' )
return model
def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] ) -> List[Any]:
# Instantiate model
logger.info(F'''Loading model based on config from {config_path}...''' )
lowercase_ : List[Any] = BertConfig.from_json_file(UpperCAmelCase__ )
lowercase_ : Union[str, Any] = BertModel(UpperCAmelCase__ )
# Load weights from checkpoint
logger.info(F'''Loading weights from checkpoint {tf_checkpoint_path}...''' )
load_tfa_weights_in_bert(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Save pytorch-model
logger.info(F'''Saving PyTorch model to {pytorch_dump_path}...''' )
torch.save(model.state_dict() , UpperCAmelCase__ )
if __name__ == "__main__":
_lowercase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow 2.x checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model (must include filename).",
)
_lowercase : List[str] = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 21
|
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def lowerCamelCase ( UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : int ) -> float:
lowercase_ : List[Any] = x
lowercase_ : Any = y
for step in range(UpperCAmelCase__ ): # noqa: B007
lowercase_ : Dict = a * a - b * b + x
lowercase_ : str = 2 * a * b + y
lowercase_ : Optional[Any] = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def lowerCamelCase ( UpperCAmelCase__ : float ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def lowerCamelCase ( UpperCAmelCase__ : float ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(UpperCAmelCase__ , 1 , 1 ) )
def lowerCamelCase ( UpperCAmelCase__ : int = 800 , UpperCAmelCase__ : int = 600 , UpperCAmelCase__ : float = -0.6 , UpperCAmelCase__ : float = 0 , UpperCAmelCase__ : float = 3.2 , UpperCAmelCase__ : int = 50 , UpperCAmelCase__ : bool = True , ) -> Image.Image:
lowercase_ : Union[str, Any] = Image.new("""RGB""" , (image_width, image_height) )
lowercase_ : Tuple = img.load()
# loop through the image-coordinates
for image_x in range(UpperCAmelCase__ ):
for image_y in range(UpperCAmelCase__ ):
# determine the figure-coordinates based on the image-coordinates
lowercase_ : Any = figure_width / image_width * image_height
lowercase_ : Tuple = figure_center_x + (image_x / image_width - 0.5) * figure_width
lowercase_ : Union[str, Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height
lowercase_ : str = get_distance(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
lowercase_ : List[Any] = get_color_coded_rgb(UpperCAmelCase__ )
else:
lowercase_ : Dict = get_black_and_white_rgb(UpperCAmelCase__ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
_lowercase : List[str] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 21
| 1
|
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> float:
lowercase_ : List[Any] = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def lowerCamelCase ( ) -> List[Any]:
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21
|
'''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = DistilBertTokenizer
UpperCamelCase__ = DistilBertTokenizerFast
UpperCamelCase__ = True
@slow
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : int = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" )
lowercase_ : str = tokenizer.encode("""sequence builders""" , add_special_tokens=lowercase_ )
lowercase_ : Optional[int] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowercase_ )
lowercase_ : Dict = tokenizer.build_inputs_with_special_tokens(lowercase_ )
lowercase_ : Tuple = tokenizer.build_inputs_with_special_tokens(lowercase_ , lowercase_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 21
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections import deque
class __magic_name__ :
def __init__( self : Any , lowercase_ : list[str] ):
lowercase_ : list[dict] = []
self.adlist.append(
{"""value""": """""", """next_states""": [], """fail_state""": 0, """output""": []} )
for keyword in keywords:
self.add_keyword(lowercase_ )
self.set_fail_transitions()
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : int , lowercase_ : str ):
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : str ):
lowercase_ : str = 0
for character in keyword:
lowercase_ : Optional[Any] = self.find_next_state(lowercase_ , lowercase_ )
if next_state is None:
self.adlist.append(
{
"""value""": character,
"""next_states""": [],
"""fail_state""": 0,
"""output""": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
lowercase_ : str = len(self.adlist ) - 1
else:
lowercase_ : Optional[int] = next_state
self.adlist[current_state]["output"].append(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : deque = deque()
for node in self.adlist[0]["next_states"]:
q.append(lowercase_ )
lowercase_ : Optional[Any] = 0
while q:
lowercase_ : Optional[int] = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(lowercase_ )
lowercase_ : Tuple = self.adlist[r]["""fail_state"""]
while (
self.find_next_state(lowercase_ , self.adlist[child]["""value"""] ) is None
and state != 0
):
lowercase_ : str = self.adlist[state]["""fail_state"""]
lowercase_ : Tuple = self.find_next_state(
lowercase_ , self.adlist[child]["""value"""] )
if self.adlist[child]["fail_state"] is None:
lowercase_ : List[Any] = 0
lowercase_ : Union[str, Any] = (
self.adlist[child]["""output"""]
+ self.adlist[self.adlist[child]["""fail_state"""]]["""output"""]
)
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : str ):
lowercase_ : dict = {} # returns a dict with keywords and list of its occurrences
lowercase_ : Any = 0
for i in range(len(lowercase_ ) ):
while (
self.find_next_state(lowercase_ , string[i] ) is None
and current_state != 0
):
lowercase_ : int = self.adlist[current_state]["""fail_state"""]
lowercase_ : Optional[Any] = self.find_next_state(lowercase_ , string[i] )
if next_state is None:
lowercase_ : Any = 0
else:
lowercase_ : Optional[int] = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
lowercase_ : Optional[Any] = []
result[key].append(i - len(lowercase_ ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
_lowercase : Union[str, Any] = {"tokenization_herbert": ["HerbertTokenizer"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = ["HerbertTokenizerFast"]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
_lowercase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 21
| 1
|
'''simple docstring'''
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def lowerCamelCase ( UpperCAmelCase__ : Dict[str, torch.Tensor] ) -> Dict[str, torch.Tensor]:
lowercase_ : Tuple = []
lowercase_ : Any = []
lowercase_ : str = []
for rt in rc.restypes:
lowercase_ : str = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
lowercase_ : int = {name: i for i, name in enumerate(UpperCAmelCase__ )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
lowercase_ : int = torch.tensor(
UpperCAmelCase__ , dtype=torch.intaa , device=protein["""aatype"""].device , )
lowercase_ : Any = torch.tensor(
UpperCAmelCase__ , dtype=torch.intaa , device=protein["""aatype"""].device , )
lowercase_ : Union[str, Any] = torch.tensor(
UpperCAmelCase__ , dtype=torch.floataa , device=protein["""aatype"""].device , )
lowercase_ : str = protein["""aatype"""].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
lowercase_ : Any = restype_atomaa_to_atomaa[protein_aatype]
lowercase_ : List[Any] = restype_atomaa_mask[protein_aatype]
lowercase_ : List[Any] = residx_atomaa_mask
lowercase_ : List[Any] = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
lowercase_ : List[str] = restype_atomaa_to_atomaa[protein_aatype]
lowercase_ : Tuple = residx_atomaa_to_atomaa.long()
# create the corresponding mask
lowercase_ : str = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["""aatype"""].device )
for restype, restype_letter in enumerate(rc.restypes ):
lowercase_ : int = rc.restype_atoa[restype_letter]
lowercase_ : Optional[int] = rc.residue_atoms[restype_name]
for atom_name in atom_names:
lowercase_ : Union[str, Any] = rc.atom_order[atom_name]
lowercase_ : List[Any] = 1
lowercase_ : str = restype_atomaa_mask[protein_aatype]
lowercase_ : Tuple = residx_atomaa_mask
return protein
def lowerCamelCase ( UpperCAmelCase__ : Dict[str, torch.Tensor] ) -> Dict[str, np.ndarray]:
lowercase_ : Dict = tree_map(lambda UpperCAmelCase__ : torch.tensor(UpperCAmelCase__ , device=batch["""aatype"""].device ) , UpperCAmelCase__ , np.ndarray )
lowercase_ : Dict = tensor_tree_map(lambda UpperCAmelCase__ : np.array(UpperCAmelCase__ ) , make_atomaa_masks(UpperCAmelCase__ ) )
return out
| 21
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowercase : Union[str, Any] = {
"configuration_encodec": [
"ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EncodecConfig",
],
"feature_extraction_encodec": ["EncodecFeatureExtractor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] = [
"ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST",
"EncodecModel",
"EncodecPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 21
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] ) -> List[Any]:
# Initialise PyTorch model
lowercase_ : List[str] = FunnelConfig.from_json_file(UpperCAmelCase__ )
print(F'''Building PyTorch model from configuration: {config}''' )
lowercase_ : Dict = FunnelBaseModel(UpperCAmelCase__ ) if base_model else FunnelModel(UpperCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , UpperCAmelCase__ )
if __name__ == "__main__":
_lowercase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not."
)
_lowercase : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 21
|
'''simple docstring'''
import os
import numpy
import onnx
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str ) -> Tuple:
lowercase_ : Tuple = a.name
lowercase_ : Tuple = b.name
lowercase_ : Any = """"""
lowercase_ : List[Any] = """"""
lowercase_ : List[Any] = a == b
lowercase_ : Union[str, Any] = name_a
lowercase_ : Optional[Any] = name_b
return res
def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(UpperCAmelCase__ , UpperCAmelCase__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase__ , UpperCAmelCase__ )
_graph_replace_input_with(node_proto.attribute[1].g , UpperCAmelCase__ , UpperCAmelCase__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ) -> int:
for n in graph_proto.node:
_node_replace_input_with(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict ) -> List[str]:
lowercase_ : int = list(model.graph.initializer )
lowercase_ : List[str] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
lowercase_ : Optional[Any] = inits[i].name
lowercase_ : List[str] = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : int ) -> List[str]:
lowercase_ : Dict = os.path.dirname(UpperCAmelCase__ )
lowercase_ : Optional[Any] = os.path.basename(UpperCAmelCase__ )
lowercase_ : str = onnx.load(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) )
lowercase_ : List[Any] = list(model.graph.initializer )
lowercase_ : int = set()
lowercase_ : int = {}
lowercase_ : str = []
lowercase_ : int = 0
for i in range(len(UpperCAmelCase__ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(UpperCAmelCase__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(UpperCAmelCase__ )
dup_set.add(UpperCAmelCase__ )
lowercase_ : Dict = inits[j].data_type
lowercase_ : List[str] = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("""unexpected data type: """ , UpperCAmelCase__ )
total_reduced_size += mem_size
lowercase_ : int = inits[i].name
lowercase_ : List[str] = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(UpperCAmelCase__ )
else:
lowercase_ : Optional[int] = [name_j]
ind_to_replace.append((j, i) )
print("""total reduced size: """ , total_reduced_size / 1024 / 1024 / 1024 , """GB""" )
lowercase_ : Tuple = sorted(UpperCAmelCase__ )
_remove_dup_initializers_from_model(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : Union[str, Any] = """optimized_""" + model_file_name
lowercase_ : Optional[int] = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
onnx.save(UpperCAmelCase__ , UpperCAmelCase__ )
return new_model
| 21
| 1
|
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
_lowercase : List[Any] = logging.get_logger(__name__)
_lowercase : Optional[int] = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
_lowercase : Optional[int] = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
_lowercase : List[Any] = {
"facebook/blenderbot_small-90M": 512,
}
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = BlenderbotSmallTokenizer
def __init__( self : Any , lowercase_ : List[str]=None , lowercase_ : List[Any]=None , lowercase_ : int="<|endoftext|>" , lowercase_ : int="<|endoftext|>" , lowercase_ : Optional[int]="<|endoftext|>" , lowercase_ : Tuple=False , lowercase_ : Any=True , **lowercase_ : Any , ):
super().__init__(
ByteLevelBPETokenizer(
vocab=lowercase_ , merges=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ , ) , bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , **lowercase_ , )
lowercase_ : Union[str, Any] = add_prefix_space
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Any , lowercase_ : Any=None ):
lowercase_ : List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ):
lowercase_ : Dict = [self.sep_token_id]
lowercase_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 21
|
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
_lowercase : str = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase)
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : str , *lowercase_ : Dict , **lowercase_ : List[Any] ):
super().__init__(*lowercase_ , **lowercase_ )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : str=None , lowercase_ : List[Any]=None , lowercase_ : Dict=None ):
lowercase_ : Optional[Any] = {}
lowercase_ : Tuple = {}
if prompt is not None:
lowercase_ : Tuple = prompt
if generate_kwargs is not None:
lowercase_ : List[str] = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
lowercase_ : List[Any] = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
lowercase_ : str = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : List[Any] , lowercase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowercase_ : Optional[int] ):
return super().__call__(lowercase_ , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : List[Any] , lowercase_ : Tuple=None ):
lowercase_ : List[Any] = load_image(lowercase_ )
if prompt is not None:
if not isinstance(lowercase_ , lowercase_ ):
raise ValueError(
f'''Received an invalid text input, got - {type(lowercase_ )} - but expected a single string. '''
"""Note also that one single text can be provided for conditional image to text generation.""" )
lowercase_ : List[Any] = self.model.config.model_type
if model_type == "git":
lowercase_ : Dict = self.image_processor(images=lowercase_ , return_tensors=self.framework )
lowercase_ : Union[str, Any] = self.tokenizer(text=lowercase_ , add_special_tokens=lowercase_ ).input_ids
lowercase_ : int = [self.tokenizer.cls_token_id] + input_ids
lowercase_ : List[Any] = torch.tensor(lowercase_ ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
lowercase_ : Union[str, Any] = self.image_processor(images=lowercase_ , header_text=lowercase_ , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
lowercase_ : Dict = self.image_processor(images=lowercase_ , return_tensors=self.framework )
lowercase_ : List[str] = self.tokenizer(lowercase_ , return_tensors=self.framework )
model_inputs.update(lowercase_ )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
lowercase_ : List[str] = self.image_processor(images=lowercase_ , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
lowercase_ : str = None
return model_inputs
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Dict , lowercase_ : Optional[Any]=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , lowercase_ )
and all(x is None for x in model_inputs["""input_ids"""] )
):
lowercase_ : Any = None
if generate_kwargs is None:
lowercase_ : Optional[Any] = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
lowercase_ : Dict = model_inputs.pop(self.model.main_input_name )
lowercase_ : Any = self.model.generate(lowercase_ , **lowercase_ , **lowercase_ )
return model_outputs
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : List[Any] ):
lowercase_ : List[str] = []
for output_ids in model_outputs:
lowercase_ : Union[str, Any] = {
"""generated_text""": self.tokenizer.decode(
lowercase_ , skip_special_tokens=lowercase_ , )
}
records.append(lowercase_ )
return records
| 21
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
_lowercase : Optional[int] = argparse.ArgumentParser(
description=(
"Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"
" Distillation"
)
)
parser.add_argument("--model_type", default="bert", choices=["bert"])
parser.add_argument("--model_name", default="bert-base-uncased", type=str)
parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_bert-base-uncased_0247911.pth", type=str)
parser.add_argument("--vocab_transform", action="store_true")
_lowercase : Optional[int] = parser.parse_args()
if args.model_type == "bert":
_lowercase : Optional[int] = BertForMaskedLM.from_pretrained(args.model_name)
_lowercase : Dict = "bert"
else:
raise ValueError("args.model_type should be \"bert\".")
_lowercase : Optional[Any] = model.state_dict()
_lowercase : Union[str, Any] = {}
for w in ["word_embeddings", "position_embeddings"]:
_lowercase : Any = state_dict[f"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
_lowercase : List[Any] = state_dict[f"""{prefix}.embeddings.LayerNorm.{w}"""]
_lowercase : Any = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
_lowercase : Any = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
_lowercase : str = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
_lowercase : Any = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
_lowercase : List[str] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
_lowercase : Any = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
_lowercase : Dict = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
_lowercase : str = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
_lowercase : Any = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
_lowercase : List[str] = state_dict["cls.predictions.decoder.weight"]
_lowercase : Dict = state_dict["cls.predictions.bias"]
if args.vocab_transform:
for w in ["weight", "bias"]:
_lowercase : Optional[int] = state_dict[f"""cls.predictions.transform.dense.{w}"""]
_lowercase : List[Any] = state_dict[f"""cls.predictions.transform.LayerNorm.{w}"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 21
|
'''simple docstring'''
class __magic_name__ :
def __init__( self : int , lowercase_ : list ):
lowercase_ : Dict = set_counts
lowercase_ : List[Any] = max(lowercase_ )
lowercase_ : str = len(lowercase_ )
lowercase_ : str = [1] * num_sets
lowercase_ : Dict = list(range(lowercase_ ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int , lowercase_ : int ):
lowercase_ : List[Any] = self.get_parent(lowercase_ )
lowercase_ : Union[str, Any] = self.get_parent(lowercase_ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
lowercase_ : List[str] = 0
lowercase_ : Optional[int] = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
lowercase_ : int = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
lowercase_ : int = 0
lowercase_ : List[Any] = src_parent
lowercase_ : List[Any] = self.set_counts[src_parent]
lowercase_ : Tuple = max(self.max_set , lowercase_ )
return True
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : int ):
if self.parents[disj_set] == disj_set:
return disj_set
lowercase_ : int = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 21
| 1
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = StableDiffusionSAGPipeline
UpperCamelCase__ = TEXT_TO_IMAGE_PARAMS
UpperCamelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
torch.manual_seed(0 )
lowercase_ : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
lowercase_ : Any = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=lowercase_ , set_alpha_to_one=lowercase_ , )
torch.manual_seed(0 )
lowercase_ : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase_ : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowercase_ : Dict = CLIPTextModel(lowercase_ )
lowercase_ : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowercase_ : Dict = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Tuple , lowercase_ : List[str]=0 ):
if str(lowercase_ ).startswith("""mps""" ):
lowercase_ : Tuple = torch.manual_seed(lowercase_ )
else:
lowercase_ : str = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowercase_ : Any = {
"""prompt""": """.""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 1.0,
"""sag_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : str = StableDiffusionSAGPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" )
lowercase_ : Optional[int] = sag_pipe.to(lowercase_ )
sag_pipe.set_progress_bar_config(disable=lowercase_ )
lowercase_ : str = """."""
lowercase_ : List[Any] = torch.manual_seed(0 )
lowercase_ : int = sag_pipe(
[prompt] , generator=lowercase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" )
lowercase_ : Dict = output.images
lowercase_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase_ : List[str] = np.array([0.15_68, 0.17_38, 0.16_95, 0.16_93, 0.15_07, 0.17_05, 0.15_47, 0.17_51, 0.19_49] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Tuple = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
lowercase_ : Tuple = sag_pipe.to(lowercase_ )
sag_pipe.set_progress_bar_config(disable=lowercase_ )
lowercase_ : str = """."""
lowercase_ : Any = torch.manual_seed(0 )
lowercase_ : Optional[int] = sag_pipe(
[prompt] , generator=lowercase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" )
lowercase_ : str = output.images
lowercase_ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase_ : Any = np.array([0.34_59, 0.28_76, 0.25_37, 0.30_02, 0.26_71, 0.21_60, 0.30_26, 0.22_62, 0.23_71] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : List[str] = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
lowercase_ : Union[str, Any] = sag_pipe.to(lowercase_ )
sag_pipe.set_progress_bar_config(disable=lowercase_ )
lowercase_ : Optional[Any] = """."""
lowercase_ : List[str] = torch.manual_seed(0 )
lowercase_ : int = sag_pipe(
[prompt] , width=768 , height=512 , generator=lowercase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" , )
lowercase_ : int = output.images
assert image.shape == (1, 512, 768, 3)
| 21
|
'''simple docstring'''
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
_lowercase : str = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase)
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : str , *lowercase_ : int , **lowercase_ : Any ):
super().__init__(*lowercase_ , **lowercase_ )
requires_backends(self , """decord""" )
self.check_model_type(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : str=None , lowercase_ : Union[str, Any]=None , lowercase_ : List[Any]=None ):
lowercase_ : Union[str, Any] = {}
if frame_sampling_rate is not None:
lowercase_ : Any = frame_sampling_rate
if num_frames is not None:
lowercase_ : Optional[Any] = num_frames
lowercase_ : Union[str, Any] = {}
if top_k is not None:
lowercase_ : Optional[Any] = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : str , lowercase_ : Union[str, List[str]] , **lowercase_ : str ):
return super().__call__(lowercase_ , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str]=None , lowercase_ : Optional[int]=1 ):
if num_frames is None:
lowercase_ : List[Any] = self.model.config.num_frames
if video.startswith("""http://""" ) or video.startswith("""https://""" ):
lowercase_ : Union[str, Any] = BytesIO(requests.get(lowercase_ ).content )
lowercase_ : Optional[Any] = VideoReader(lowercase_ )
videoreader.seek(0 )
lowercase_ : Tuple = 0
lowercase_ : List[Any] = num_frames * frame_sampling_rate - 1
lowercase_ : Optional[int] = np.linspace(lowercase_ , lowercase_ , num=lowercase_ , dtype=np.intaa )
lowercase_ : Optional[int] = videoreader.get_batch(lowercase_ ).asnumpy()
lowercase_ : Union[str, Any] = list(lowercase_ )
lowercase_ : Optional[Any] = self.image_processor(lowercase_ , return_tensors=self.framework )
return model_inputs
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : str ):
lowercase_ : int = self.model(**lowercase_ )
return model_outputs
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : List[Any] , lowercase_ : Dict=5 ):
if top_k > self.model.config.num_labels:
lowercase_ : List[Any] = self.model.config.num_labels
if self.framework == "pt":
lowercase_ : str = model_outputs.logits.softmax(-1 )[0]
lowercase_ , lowercase_ : Optional[Any] = probs.topk(lowercase_ )
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
lowercase_ : Union[str, Any] = scores.tolist()
lowercase_ : Tuple = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowercase_ , lowercase_ )]
| 21
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase : Optional[Any] = "▁"
_lowercase : Dict = {"vocab_file": "spiece.model"}
_lowercase : int = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"}
}
_lowercase : Optional[int] = {
"google/pegasus-xsum": 512,
}
_lowercase : Dict = logging.get_logger(__name__)
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , lowercase_ : int , lowercase_ : int="<pad>" , lowercase_ : Tuple="</s>" , lowercase_ : Optional[int]="<unk>" , lowercase_ : str="<mask_2>" , lowercase_ : str="<mask_1>" , lowercase_ : str=None , lowercase_ : Optional[int]=103 , lowercase_ : Optional[Dict[str, Any]] = None , **lowercase_ : Optional[int] , ):
lowercase_ : Optional[Any] = offset
if additional_special_tokens is not None:
if not isinstance(lowercase_ , lowercase_ ):
raise TypeError(
f'''additional_special_tokens should be of type {type(lowercase_ )}, but is'''
f''' {type(lowercase_ )}''' )
lowercase_ : Optional[Any] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(lowercase_ ) , self.offset - 1 )
]
if len(set(lowercase_ ) ) != len(lowercase_ ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
lowercase_ : str = additional_special_tokens_extended
else:
lowercase_ : int = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
lowercase_ : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase_ , unk_token=lowercase_ , mask_token=lowercase_ , pad_token=lowercase_ , mask_token_sent=lowercase_ , offset=lowercase_ , additional_special_tokens=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
lowercase_ : Any = mask_token_sent
lowercase_ : Any = vocab_file
lowercase_ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase_ )
# add special tokens to encoder dict
lowercase_ : Dict[int, str] = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
lowercase_ : Dict[str, int] = {v: k for k, v in self.encoder.items()}
@property
def SCREAMING_SNAKE_CASE_ ( self : str ):
return len(self.sp_model ) + self.offset
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : int = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ):
lowercase_ : str = self.__dict__.copy()
lowercase_ : int = None
return state
def __setstate__( self : Optional[Any] , lowercase_ : int ):
lowercase_ : int = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowercase_ : Dict = {}
lowercase_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : str ):
return self.sp_model.encode(lowercase_ , out_type=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : str ):
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
lowercase_ : str = self.sp_model.piece_to_id(lowercase_ )
return sp_id + self.offset
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int ):
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
lowercase_ : List[str] = self.sp_model.IdToPiece(index - self.offset )
return token
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : List[str] ):
lowercase_ : int = []
lowercase_ : int = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase_ ) + token
lowercase_ : List[str] = []
else:
current_sub_tokens.append(lowercase_ )
out_string += self.sp_model.decode(lowercase_ )
return out_string.strip()
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : List[Any]=False ):
return 1
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : List[Any] ):
lowercase_ : Union[str, Any] = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : List , lowercase_ : Optional[List] = None , lowercase_ : bool = False ):
if already_has_special_tokens:
return self._special_token_mask(lowercase_ )
elif token_ids_a is None:
return self._special_token_mask(lowercase_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : List[Any] , lowercase_ : Any=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : str , lowercase_ : Optional[str] = None ):
if not os.path.isdir(lowercase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase_ : Optional[Any] = os.path.join(
lowercase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_ , """wb""" ) as fi:
lowercase_ : Dict = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (out_vocab_file,)
| 21
|
'''simple docstring'''
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] ) -> List[str]:
if isinstance(UpperCAmelCase__ , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class __magic_name__ :
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Any , lowercase_ : str ):
pass
def SCREAMING_SNAKE_CASE_ ( self : str ):
pass
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
pass
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : np.ndarray , lowercase_ : np.ndarray , lowercase_ : float ):
lowercase_ : Optional[Any] = np.abs((a - b) ).max()
self.assertLessEqual(lowercase_ , lowercase_ , f'''Difference between torch and flax is {diff} (>= {tol}).''' )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : Tuple=None , **lowercase_ : Optional[int] ):
lowercase_ : Any = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase_ , lowercase_ )
lowercase_ : Any = FlaxVisionTextDualEncoderModel(lowercase_ )
lowercase_ : List[Any] = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : List[str] , lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : List[Any]=None , **lowercase_ : Tuple ):
lowercase_ , lowercase_ : Any = self.get_vision_text_model(lowercase_ , lowercase_ )
lowercase_ : Optional[int] = {"""vision_model""": vision_model, """text_model""": text_model}
lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase_ )
lowercase_ : List[Any] = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : str , lowercase_ : Dict , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : Optional[Any]=None , **lowercase_ : int ):
lowercase_ , lowercase_ : Union[str, Any] = self.get_vision_text_model(lowercase_ , lowercase_ )
lowercase_ : Optional[Any] = {"""vision_model""": vision_model, """text_model""": text_model}
lowercase_ : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase_ )
lowercase_ : Tuple = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ )
lowercase_ : Any = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase_ )
lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained(lowercase_ )
lowercase_ : List[str] = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ )
lowercase_ : Union[str, Any] = after_output[0]
lowercase_ : str = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowercase_ , 1E-3 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : Dict=None , **lowercase_ : Optional[Any] ):
lowercase_ , lowercase_ : Optional[int] = self.get_vision_text_model(lowercase_ , lowercase_ )
lowercase_ : Dict = {"""vision_model""": vision_model, """text_model""": text_model}
lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase_ )
lowercase_ : Optional[int] = model(
input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , output_attentions=lowercase_ )
lowercase_ : Tuple = output.vision_model_output.attentions
self.assertEqual(len(lowercase_ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase_ : List[str] = to_atuple(vision_model.config.image_size )
lowercase_ : Optional[Any] = to_atuple(vision_model.config.patch_size )
lowercase_ : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowercase_ : Optional[Any] = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowercase_ : Union[str, Any] = output.text_model_output.attentions
self.assertEqual(len(lowercase_ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : int ):
pt_model.to(lowercase_ )
pt_model.eval()
# prepare inputs
lowercase_ : int = inputs_dict
lowercase_ : Tuple = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
lowercase_ : str = pt_model(**lowercase_ ).to_tuple()
lowercase_ : Optional[Any] = fx_model(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowercase_ , pt_output.numpy() , 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowercase_ )
lowercase_ : Tuple = FlaxVisionTextDualEncoderModel.from_pretrained(lowercase_ , from_pt=lowercase_ )
lowercase_ : Dict = fx_model_loaded(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowercase_ , pt_output.numpy() , 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowercase_ )
lowercase_ : Union[str, Any] = VisionTextDualEncoderModel.from_pretrained(lowercase_ , from_flax=lowercase_ )
pt_model_loaded.to(lowercase_ )
pt_model_loaded.eval()
with torch.no_grad():
lowercase_ : List[Any] = pt_model_loaded(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(lowercase_ , pt_output_loaded.numpy() , 4E-2 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Any , lowercase_ : str , lowercase_ : Union[str, Any] ):
lowercase_ : Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase_ , lowercase_ )
lowercase_ : List[Any] = VisionTextDualEncoderModel(lowercase_ )
lowercase_ : Union[str, Any] = FlaxVisionTextDualEncoderModel(lowercase_ )
lowercase_ : Optional[Any] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowercase_ )
lowercase_ : Tuple = fx_state
self.check_pt_flax_equivalence(lowercase_ , lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : List[Any] ):
lowercase_ : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase_ , lowercase_ )
lowercase_ : int = VisionTextDualEncoderModel(lowercase_ )
lowercase_ : Dict = FlaxVisionTextDualEncoderModel(lowercase_ )
lowercase_ : Optional[Any] = load_flax_weights_in_pytorch_model(lowercase_ , fx_model.params )
self.check_pt_flax_equivalence(lowercase_ , lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : Tuple = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : List[Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : List[Any] = self.prepare_config_and_inputs()
self.check_save_load(**lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Union[str, Any] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowercase_ )
@is_pt_flax_cross_test
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Tuple = self.prepare_config_and_inputs()
lowercase_ : List[Any] = config_inputs_dict.pop("""vision_config""" )
lowercase_ : int = config_inputs_dict.pop("""text_config""" )
lowercase_ : Optional[int] = config_inputs_dict
self.check_equivalence_pt_to_flax(lowercase_ , lowercase_ , lowercase_ )
self.check_equivalence_flax_to_pt(lowercase_ , lowercase_ , lowercase_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ , lowercase_ : str = self.get_pretrained_model_and_inputs()
lowercase_ : Dict = model_a(**lowercase_ )
lowercase_ : str = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowercase_ )
lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained(lowercase_ )
lowercase_ : str = model_a(**lowercase_ )
lowercase_ : Union[str, Any] = after_outputs[0]
lowercase_ : Any = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowercase_ , 1E-5 )
@require_flax
class __magic_name__ ( _UpperCAmelCase, unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=lowercase_ , text_from_pt=lowercase_ , )
lowercase_ : List[str] = 13
lowercase_ : Optional[Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowercase_ : Any = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
lowercase_ : str = random_attention_mask([batch_size, 4] )
lowercase_ : List[str] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : List[Any] , lowercase_ : Tuple ):
lowercase_ : Union[str, Any] = FlaxViTModel(lowercase_ )
lowercase_ : Dict = FlaxBertModel(lowercase_ )
return vision_model, text_model
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : Any = FlaxViTModelTester(self )
lowercase_ : Optional[Any] = FlaxBertModelTester(self )
lowercase_ : Dict = vit_model_tester.prepare_config_and_inputs()
lowercase_ : Optional[Any] = bert_model_tester.prepare_config_and_inputs()
lowercase_ , lowercase_ : List[str] = vision_config_and_inputs
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class __magic_name__ ( _UpperCAmelCase, unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-clip""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=lowercase_ , text_from_pt=lowercase_ , )
lowercase_ : List[str] = 13
lowercase_ : Optional[Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowercase_ : int = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
lowercase_ : Tuple = random_attention_mask([batch_size, 4] )
lowercase_ : Union[str, Any] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] ):
lowercase_ : Tuple = FlaxCLIPVisionModel(lowercase_ )
lowercase_ : Any = FlaxBertModel(lowercase_ )
return vision_model, text_model
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Union[str, Any] = FlaxCLIPVisionModelTester(self )
lowercase_ : Tuple = FlaxBertModelTester(self )
lowercase_ : Union[str, Any] = clip_model_tester.prepare_config_and_inputs()
lowercase_ : Any = bert_model_tester.prepare_config_and_inputs()
lowercase_ , lowercase_ : Optional[Any] = vision_config_and_inputs
lowercase_ , lowercase_ , lowercase_ , lowercase_ : str = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class __magic_name__ ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : List[str] = FlaxVisionTextDualEncoderModel.from_pretrained("""clip-italian/clip-italian""" , logit_scale_init_value=1.0 )
lowercase_ : Optional[Any] = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
lowercase_ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowercase_ : Optional[int] = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=lowercase_ , padding=lowercase_ , return_tensors="""np""" )
lowercase_ : List[str] = model(**lowercase_ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
lowercase_ : Optional[Any] = np.array([[1.2_28_47_27, 0.3_10_41_22]] )
self.assertTrue(np.allclose(outputs.logits_per_image , lowercase_ , atol=1E-3 ) )
| 21
| 1
|
'''simple docstring'''
from math import factorial
_lowercase : List[str] = {str(d): factorial(d) for d in range(10)}
def lowerCamelCase ( UpperCAmelCase__ : int ) -> int:
return sum(DIGIT_FACTORIAL[d] for d in str(UpperCAmelCase__ ) )
def lowerCamelCase ( ) -> int:
lowercase_ : Tuple = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , UpperCAmelCase__ ) if sum_of_digit_factorial(UpperCAmelCase__ ) == i )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 21
|
'''simple docstring'''
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class __magic_name__ ( unittest.TestCase):
def __init__( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : int=7 , lowercase_ : Optional[Any]=3 , lowercase_ : Optional[Any]=18 , lowercase_ : List[Any]=30 , lowercase_ : int=400 , lowercase_ : Dict=True , lowercase_ : List[Any]=None , lowercase_ : Dict=True , ):
lowercase_ : Tuple = size if size is not None else {"""height""": 18, """width""": 18}
lowercase_ : List[str] = parent
lowercase_ : Any = batch_size
lowercase_ : Optional[Any] = num_channels
lowercase_ : Tuple = image_size
lowercase_ : Optional[Any] = min_resolution
lowercase_ : Dict = max_resolution
lowercase_ : Optional[int] = do_resize
lowercase_ : Optional[Any] = size
lowercase_ : Union[str, Any] = do_normalize
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04],
[-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class __magic_name__ ( _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = ImageGPTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Optional[int] = ImageGPTImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , """clusters""" ) )
self.assertTrue(hasattr(lowercase_ , """do_resize""" ) )
self.assertTrue(hasattr(lowercase_ , """size""" ) )
self.assertTrue(hasattr(lowercase_ , """do_normalize""" ) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
lowercase_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : int = self.image_processing_class(**self.image_processor_dict )
lowercase_ : Union[str, Any] = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase_ , obj[key] ) )
else:
self.assertEqual(obj[key] , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : str = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase_ : Union[str, Any] = os.path.join(lowercase_ , """image_processor.json""" )
image_processor_first.to_json_file(lowercase_ )
lowercase_ : Optional[Any] = self.image_processing_class.from_json_file(lowercase_ ).to_dict()
lowercase_ : Any = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowercase_ )
lowercase_ : Any = self.image_processing_class.from_pretrained(lowercase_ ).to_dict()
lowercase_ : List[str] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowercase_ )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
pass
def lowerCamelCase ( ) -> Any:
lowercase_ : Union[str, Any] = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" )
lowercase_ : Any = Image.open(dataset[4]["""file"""] )
lowercase_ : Dict = Image.open(dataset[5]["""file"""] )
lowercase_ : int = [imagea, imagea]
return images
@require_vision
@require_torch
class __magic_name__ ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Optional[Any] = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
lowercase_ : Optional[int] = prepare_images()
# test non-batched
lowercase_ : str = image_processing(images[0] , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1024) )
lowercase_ : Tuple = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , lowercase_ )
# test batched
lowercase_ : List[str] = image_processing(lowercase_ , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1024) )
lowercase_ : Union[str, Any] = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowercase_ )
| 21
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase : Union[str, Any] = logging.get_logger(__name__)
_lowercase : Optional[Any] = {
"facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''levit'''
def __init__( self : Optional[Any] , lowercase_ : List[Any]=224 , lowercase_ : Optional[Any]=3 , lowercase_ : Any=3 , lowercase_ : Optional[Any]=2 , lowercase_ : Dict=1 , lowercase_ : int=16 , lowercase_ : Union[str, Any]=[128, 256, 384] , lowercase_ : Dict=[4, 8, 12] , lowercase_ : Tuple=[4, 4, 4] , lowercase_ : str=[16, 16, 16] , lowercase_ : Optional[Any]=0 , lowercase_ : str=[2, 2, 2] , lowercase_ : Tuple=[2, 2, 2] , lowercase_ : Optional[int]=0.02 , **lowercase_ : List[str] , ):
super().__init__(**lowercase_ )
lowercase_ : List[Any] = image_size
lowercase_ : Dict = num_channels
lowercase_ : Any = kernel_size
lowercase_ : Any = stride
lowercase_ : int = padding
lowercase_ : List[str] = hidden_sizes
lowercase_ : str = num_attention_heads
lowercase_ : Dict = depths
lowercase_ : str = key_dim
lowercase_ : str = drop_path_rate
lowercase_ : List[Any] = patch_size
lowercase_ : str = attention_ratio
lowercase_ : List[Any] = mlp_ratio
lowercase_ : str = initializer_range
lowercase_ : List[str] = [
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = version.parse('''1.11''')
@property
def SCREAMING_SNAKE_CASE_ ( self : int ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
return 1E-4
| 21
|
'''simple docstring'''
def lowerCamelCase ( ) -> Dict:
lowercase_ : Union[str, Any] = []
lowercase_ : Tuple = 1
while len(UpperCAmelCase__ ) < 1e6:
constant.append(str(UpperCAmelCase__ ) )
i += 1
lowercase_ : int = """""".join(UpperCAmelCase__ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution())
| 21
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
def lowerCamelCase ( ) -> Generator[int, None, None]:
lowercase_ : dict[int, int] = {}
lowercase_ : int = 2
while True:
lowercase_ : Tuple = factor_map.pop(UpperCAmelCase__ , UpperCAmelCase__ )
if factor:
lowercase_ : Dict = factor + prime
while x in factor_map:
x += factor
lowercase_ : str = factor
else:
lowercase_ : Union[str, Any] = prime
yield prime
prime += 1
def lowerCamelCase ( UpperCAmelCase__ : float = 1e10 ) -> int:
lowercase_ : int = sieve()
lowercase_ : int = 1
while True:
lowercase_ : Union[str, Any] = next(UpperCAmelCase__ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(UpperCAmelCase__ )
n += 2
if __name__ == "__main__":
print(solution())
| 21
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
_lowercase : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : str ):
super().__init__()
self.register_modules(unet=lowercase_ , scheduler=lowercase_ )
@torch.no_grad()
def __call__( self : List[str] , lowercase_ : int = 1 , lowercase_ : int = 100 , lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ : Optional[float] = None , lowercase_ : bool = True , ):
if audio_length_in_s is None:
lowercase_ : List[Any] = self.unet.config.sample_size / self.unet.config.sample_rate
lowercase_ : Dict = audio_length_in_s * self.unet.config.sample_rate
lowercase_ : Any = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
lowercase_ : List[Any] = int(lowercase_ )
if sample_size % down_scale_factor != 0:
lowercase_ : int = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
""" process.""" )
lowercase_ : Any = int(lowercase_ )
lowercase_ : List[str] = next(iter(self.unet.parameters() ) ).dtype
lowercase_ : List[str] = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowercase_ : Any = randn_tensor(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_ )
# set step values
self.scheduler.set_timesteps(lowercase_ , device=audio.device )
lowercase_ : Optional[Any] = self.scheduler.timesteps.to(lowercase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowercase_ : Dict = self.unet(lowercase_ , lowercase_ ).sample
# 2. compute previous image: x_t -> t_t-1
lowercase_ : List[str] = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample
lowercase_ : str = audio.clamp(-1 , 1 ).float().cpu().numpy()
lowercase_ : Union[str, Any] = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowercase_ )
| 21
| 1
|
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
_lowercase : List[str] = pd.read_csv("sample_data.csv", header=None)
_lowercase : Optional[Any] = df.shape[:1][0]
# If you're using some other dataset input the target column
_lowercase : Optional[int] = df.iloc[:, 1:2]
_lowercase : Dict = actual_data.values.reshape(len_data, 1)
_lowercase : str = MinMaxScaler().fit_transform(actual_data)
_lowercase : str = 10
_lowercase : int = 5
_lowercase : Union[str, Any] = 20
_lowercase : Dict = len_data - periods * look_back
_lowercase : Union[str, Any] = actual_data[:division]
_lowercase : List[Any] = actual_data[division - look_back :]
_lowercase , _lowercase : Dict = [], []
_lowercase , _lowercase : Union[str, Any] = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
_lowercase : Optional[Any] = np.array(train_x)
_lowercase : List[str] = np.array(test_x)
_lowercase : Tuple = np.array([list(i.ravel()) for i in train_y])
_lowercase : List[Any] = np.array([list(i.ravel()) for i in test_y])
_lowercase : List[Any] = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss="mean_squared_error", optimizer="adam")
_lowercase : Tuple = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
_lowercase : List[Any] = model.predict(x_test)
| 21
|
'''simple docstring'''
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
_lowercase : Union[str, Any] = "src/transformers"
_lowercase : str = "docs/source/en"
_lowercase : Union[str, Any] = "."
def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] ) -> int:
with open(UpperCAmelCase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowercase_ : Union[str, Any] = f.readlines()
# Find the start prompt.
lowercase_ : Optional[Any] = 0
while not lines[start_index].startswith(UpperCAmelCase__ ):
start_index += 1
start_index += 1
lowercase_ : int = start_index
while not lines[end_index].startswith(UpperCAmelCase__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
_lowercase : int = "Model|Encoder|Decoder|ForConditionalGeneration"
# Regexes that match TF/Flax/PT model names.
_lowercase : str = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
_lowercase : Optional[Any] = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_lowercase : int = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# This is to make sure the transformers module imported is the one in the repo.
_lowercase : Optional[Any] = direct_transformers_import(TRANSFORMERS_PATH)
def lowerCamelCase ( UpperCAmelCase__ : int ) -> Any:
lowercase_ : str = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , UpperCAmelCase__ )
return [m.group(0 ) for m in matches]
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple ) -> List[Any]:
lowercase_ : Dict = 2 if text == """✅""" or text == """❌""" else len(UpperCAmelCase__ )
lowercase_ : List[str] = (width - text_length) // 2
lowercase_ : Dict = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def lowerCamelCase ( ) -> Any:
lowercase_ : int = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
lowercase_ : Any = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
lowercase_ : int = {name: config.replace("""Config""" , """""" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
lowercase_ : List[Any] = collections.defaultdict(UpperCAmelCase__ )
lowercase_ : List[str] = collections.defaultdict(UpperCAmelCase__ )
lowercase_ : Any = collections.defaultdict(UpperCAmelCase__ )
lowercase_ : Tuple = collections.defaultdict(UpperCAmelCase__ )
lowercase_ : Optional[int] = collections.defaultdict(UpperCAmelCase__ )
# Let's lookup through all transformers object (once).
for attr_name in dir(UpperCAmelCase__ ):
lowercase_ : Union[str, Any] = None
if attr_name.endswith("""Tokenizer""" ):
lowercase_ : Optional[int] = slow_tokenizers
lowercase_ : Union[str, Any] = attr_name[:-9]
elif attr_name.endswith("""TokenizerFast""" ):
lowercase_ : Optional[Any] = fast_tokenizers
lowercase_ : Dict = attr_name[:-13]
elif _re_tf_models.match(UpperCAmelCase__ ) is not None:
lowercase_ : str = tf_models
lowercase_ : str = _re_tf_models.match(UpperCAmelCase__ ).groups()[0]
elif _re_flax_models.match(UpperCAmelCase__ ) is not None:
lowercase_ : List[str] = flax_models
lowercase_ : int = _re_flax_models.match(UpperCAmelCase__ ).groups()[0]
elif _re_pt_models.match(UpperCAmelCase__ ) is not None:
lowercase_ : Tuple = pt_models
lowercase_ : Optional[int] = _re_pt_models.match(UpperCAmelCase__ ).groups()[0]
if lookup_dict is not None:
while len(UpperCAmelCase__ ) > 0:
if attr_name in model_name_to_prefix.values():
lowercase_ : int = True
break
# Try again after removing the last word in the name
lowercase_ : Optional[Any] = """""".join(camel_case_split(UpperCAmelCase__ )[:-1] )
# Let's build that table!
lowercase_ : Dict = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
lowercase_ : Optional[Any] = ["""Model""", """Tokenizer slow""", """Tokenizer fast""", """PyTorch support""", """TensorFlow support""", """Flax Support"""]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
lowercase_ : Union[str, Any] = [len(UpperCAmelCase__ ) + 2 for c in columns]
lowercase_ : int = max([len(UpperCAmelCase__ ) for name in model_names] ) + 2
# Build the table per se
lowercase_ : Tuple = """|""" + """|""".join([_center_text(UpperCAmelCase__ , UpperCAmelCase__ ) for c, w in zip(UpperCAmelCase__ , UpperCAmelCase__ )] ) + """|\n"""
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([""":""" + """-""" * (w - 2) + """:""" for w in widths] ) + "|\n"
lowercase_ : int = {True: """✅""", False: """❌"""}
for name in model_names:
lowercase_ : str = model_name_to_prefix[name]
lowercase_ : Any = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(UpperCAmelCase__ , UpperCAmelCase__ ) for l, w in zip(UpperCAmelCase__ , UpperCAmelCase__ )] ) + "|\n"
return table
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any]=False ) -> str:
lowercase_ , lowercase_ , lowercase_ , lowercase_ : List[str] = _find_text_in_file(
filename=os.path.join(UpperCAmelCase__ , """index.md""" ) , start_prompt="""<!--This table is updated automatically from the auto modules""" , end_prompt="""<!-- End table-->""" , )
lowercase_ : Dict = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(UpperCAmelCase__ , """index.md""" ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"""The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.""" )
if __name__ == "__main__":
_lowercase : Any = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_lowercase : Optional[Any] = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 21
| 1
|
'''simple docstring'''
class __magic_name__ :
def __init__( self : int , lowercase_ : list ):
lowercase_ : Dict = set_counts
lowercase_ : List[Any] = max(lowercase_ )
lowercase_ : str = len(lowercase_ )
lowercase_ : str = [1] * num_sets
lowercase_ : Dict = list(range(lowercase_ ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int , lowercase_ : int ):
lowercase_ : List[Any] = self.get_parent(lowercase_ )
lowercase_ : Union[str, Any] = self.get_parent(lowercase_ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
lowercase_ : List[str] = 0
lowercase_ : Optional[int] = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
lowercase_ : int = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
lowercase_ : int = 0
lowercase_ : List[Any] = src_parent
lowercase_ : List[Any] = self.set_counts[src_parent]
lowercase_ : Tuple = max(self.max_set , lowercase_ )
return True
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : int ):
if self.parents[disj_set] == disj_set:
return disj_set
lowercase_ : int = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 21
|
'''simple docstring'''
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class __magic_name__ ( ctypes.Structure):
# _fields is a specific attr expected by ctypes
UpperCamelCase__ = [('''size''', ctypes.c_int), ('''visible''', ctypes.c_byte)]
def lowerCamelCase ( ) -> List[Any]:
if os.name == "nt":
lowercase_ : List[Any] = CursorInfo()
lowercase_ : int = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) )
lowercase_ : List[str] = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25l""" )
sys.stdout.flush()
def lowerCamelCase ( ) -> str:
if os.name == "nt":
lowercase_ : int = CursorInfo()
lowercase_ : Optional[Any] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) )
lowercase_ : Optional[int] = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25h""" )
sys.stdout.flush()
@contextmanager
def lowerCamelCase ( ) -> Any:
try:
hide_cursor()
yield
finally:
show_cursor()
| 21
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase : Optional[int] = logging.get_logger(__name__)
_lowercase : Tuple = {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json",
"roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json",
}
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''roberta'''
def __init__( self : Dict , lowercase_ : Any=50265 , lowercase_ : Optional[Any]=768 , lowercase_ : List[str]=12 , lowercase_ : Any=12 , lowercase_ : Optional[int]=3072 , lowercase_ : Dict="gelu" , lowercase_ : Optional[int]=0.1 , lowercase_ : int=0.1 , lowercase_ : Optional[int]=512 , lowercase_ : List[str]=2 , lowercase_ : Any=0.02 , lowercase_ : Union[str, Any]=1E-12 , lowercase_ : Optional[Any]=1 , lowercase_ : Optional[int]=0 , lowercase_ : Tuple=2 , lowercase_ : Any="absolute" , lowercase_ : int=True , lowercase_ : Union[str, Any]=None , **lowercase_ : Tuple , ):
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
lowercase_ : Optional[Any] = vocab_size
lowercase_ : Optional[Any] = hidden_size
lowercase_ : List[Any] = num_hidden_layers
lowercase_ : Any = num_attention_heads
lowercase_ : Union[str, Any] = hidden_act
lowercase_ : int = intermediate_size
lowercase_ : List[str] = hidden_dropout_prob
lowercase_ : int = attention_probs_dropout_prob
lowercase_ : Optional[Any] = max_position_embeddings
lowercase_ : Dict = type_vocab_size
lowercase_ : str = initializer_range
lowercase_ : int = layer_norm_eps
lowercase_ : Union[str, Any] = position_embedding_type
lowercase_ : Optional[int] = use_cache
lowercase_ : Optional[int] = classifier_dropout
class __magic_name__ ( _UpperCAmelCase):
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
if self.task == "multiple-choice":
lowercase_ : Tuple = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase_ : List[str] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 21
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
_lowercase : int = logging.get_logger(__name__)
@dataclass
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self : Optional[Any] , **lowercase_ : int ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowercase_ : Optional[int] = deprecated_arg[3:]
setattr(self , lowercase_ , not kwargs.pop(lowercase_ ) )
logger.warning(
f'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'''
f''' {positive_arg}={kwargs[positive_arg]}''' )
lowercase_ : Tuple = kwargs.pop("""torchscript""" , self.torchscript )
lowercase_ : List[Any] = kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics )
lowercase_ : List[Any] = kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level )
super().__init__(**lowercase_ )
UpperCamelCase__ = field(default=_UpperCAmelCase, metadata={'''help''': '''Trace the models using torchscript'''})
UpperCamelCase__ = field(default=_UpperCAmelCase, metadata={'''help''': '''Print Xla/PyTorch tpu metrics'''})
UpperCamelCase__ = field(
default='''O1''', metadata={
'''help''': (
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '''
'''See details at https://nvidia.github.io/apex/amp.html'''
)
}, )
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
requires_backends(self , ["""torch"""] )
logger.info("""PyTorch: setting up devices""" )
if not self.cuda:
lowercase_ : Optional[Any] = torch.device("""cpu""" )
lowercase_ : Tuple = 0
elif is_torch_tpu_available():
lowercase_ : Optional[int] = xm.xla_device()
lowercase_ : str = 0
else:
lowercase_ : int = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
lowercase_ : str = torch.cuda.device_count()
return device, n_gpu
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
return is_torch_tpu_available() and self.tpu
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
requires_backends(self , ["""torch"""] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
requires_backends(self , ["""torch"""] )
return self._setup_devices[0]
@property
def SCREAMING_SNAKE_CASE_ ( self : int ):
requires_backends(self , ["""torch"""] )
return self._setup_devices[1]
@property
def SCREAMING_SNAKE_CASE_ ( self : int ):
return self.n_gpu > 0
| 21
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.