code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def lowerCAmelCase__ ( a__ , a__ ) ->np.array:
'''simple docstring'''
_UpperCamelCase = f'{sampling_rate}'
_UpperCamelCase = "1"
_UpperCamelCase = "f32le"
_UpperCamelCase = [
"ffmpeg",
"-i",
"pipe:0",
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
try:
with subprocess.Popen(a__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
_UpperCamelCase = ffmpeg_process.communicate(a__ )
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to load audio files from filename" ) from error
_UpperCamelCase = output_stream[0]
_UpperCamelCase = np.frombuffer(a__ , np.floataa )
if audio.shape[0] == 0:
raise ValueError("Malformed soundfile" )
return audio
def lowerCAmelCase__ ( a__ , a__ , a__ = "f32le" , ) ->int:
'''simple docstring'''
_UpperCamelCase = f'{sampling_rate}'
_UpperCamelCase = "1"
if format_for_conversion == "s16le":
_UpperCamelCase = 2
elif format_for_conversion == "f32le":
_UpperCamelCase = 4
else:
raise ValueError(f'Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`' )
_UpperCamelCase = platform.system()
if system == "Linux":
_UpperCamelCase = "alsa"
_UpperCamelCase = "default"
elif system == "Darwin":
_UpperCamelCase = "avfoundation"
_UpperCamelCase = ":0"
elif system == "Windows":
_UpperCamelCase = "dshow"
_UpperCamelCase = "default"
_UpperCamelCase = [
"ffmpeg",
"-f",
format_,
"-i",
input_,
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-fflags",
"nobuffer",
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
_UpperCamelCase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
_UpperCamelCase = _ffmpeg_stream(a__ , a__ )
for item in iterator:
yield item
def lowerCAmelCase__ ( a__ , a__ , a__ = None , a__ = None , a__ = "f32le" , ) ->Union[str, Any]:
'''simple docstring'''
if stream_chunk_s is not None:
_UpperCamelCase = stream_chunk_s
else:
_UpperCamelCase = chunk_length_s
_UpperCamelCase = ffmpeg_microphone(a__ , a__ , format_for_conversion=a__ )
if format_for_conversion == "s16le":
_UpperCamelCase = np.intaa
_UpperCamelCase = 2
elif format_for_conversion == "f32le":
_UpperCamelCase = np.floataa
_UpperCamelCase = 4
else:
raise ValueError(f'Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`' )
if stride_length_s is None:
_UpperCamelCase = chunk_length_s / 6
_UpperCamelCase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(a__ , (int, float) ):
_UpperCamelCase = [stride_length_s, stride_length_s]
_UpperCamelCase = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
_UpperCamelCase = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
_UpperCamelCase = datetime.datetime.now()
_UpperCamelCase = datetime.timedelta(seconds=a__ )
for item in chunk_bytes_iter(a__ , a__ , stride=(stride_left, stride_right) , stream=a__ ):
# Put everything back in numpy scale
_UpperCamelCase = np.frombuffer(item["raw"] , dtype=a__ )
_UpperCamelCase = (
item["stride"][0] // size_of_sample,
item["stride"][1] // size_of_sample,
)
_UpperCamelCase = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ = False ) ->Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = B""
_UpperCamelCase , _UpperCamelCase = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f'Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}' )
_UpperCamelCase = 0
for raw in iterator:
acc += raw
if stream and len(a__ ) < chunk_len:
_UpperCamelCase = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(a__ ) >= chunk_len:
# We are flushing the accumulator
_UpperCamelCase = (_stride_left, stride_right)
_UpperCamelCase = {"raw": acc[:chunk_len], "stride": stride}
if stream:
_UpperCamelCase = False
yield item
_UpperCamelCase = stride_left
_UpperCamelCase = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(a__ ) > stride_left:
_UpperCamelCase = {"raw": acc, "stride": (_stride_left, 0)}
if stream:
_UpperCamelCase = False
yield item
def lowerCAmelCase__ ( a__ , a__ ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase = 2**24 # 16Mo
try:
with subprocess.Popen(a__ , stdout=subprocess.PIPE , bufsize=a__ ) as ffmpeg_process:
while True:
_UpperCamelCase = ffmpeg_process.stdout.read(a__ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to stream audio files from filename" ) from error
| 82 | import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
lowerCamelCase__ = 5_0000
lowerCamelCase__ = 5000
lowerCamelCase__,lowerCamelCase__ = os.path.split(__file__)
lowerCamelCase__ = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def lowerCAmelCase__ ( a__ , a__ ) ->int:
'''simple docstring'''
for i in range(a__ ):
_UpperCamelCase = dataset[i]
@get_duration
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->int:
'''simple docstring'''
for i in range(0 , len(a__ ) , a__ ):
_UpperCamelCase = dataset[i : i + batch_size]
@get_duration
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->Union[str, Any]:
'''simple docstring'''
with dataset.formatted_as(type=a__ ):
for i in range(a__ ):
_UpperCamelCase = dataset[i]
@get_duration
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ ) ->Dict:
'''simple docstring'''
with dataset.formatted_as(type=a__ ):
for i in range(0 , a__ , a__ ):
_UpperCamelCase = dataset[i : i + batch_size]
def lowerCAmelCase__ ( ) ->Dict:
'''simple docstring'''
_UpperCamelCase = {"num examples": SPEED_TEST_N_EXAMPLES}
_UpperCamelCase = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted, {"type": "pandas", "length": SMALL_TEST}),
(read_formatted, {"type": "torch", "length": SMALL_TEST}),
(read_formatted, {"type": "tensorflow", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
_UpperCamelCase = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset" )
_UpperCamelCase = datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} )
_UpperCamelCase = generate_example_dataset(
os.path.join(a__ , "dataset.arrow" ) , a__ , num_examples=a__ , seq_shapes={"list": (100,)} , )
print("first set of iterations" )
for func, kwargs in functions:
print(func.__name__ , str(a__ ) )
_UpperCamelCase = func(a__ , **a__ )
print("shuffling dataset" )
_UpperCamelCase = dataset.shuffle()
print("Second set of iterations (after shuffling" )
for func, kwargs in functions_shuffled:
print("shuffled " , func.__name__ , str(a__ ) )
_UpperCamelCase = func(
a__ , **a__ )
with open(a__ , "wb" ) as f:
f.write(json.dumps(a__ ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 82 | 1 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : List[Any] , lowercase_ : int , lowercase_ : int) -> Any:
"""simple docstring"""
_UpperCamelCase = jnp.ones((batch_size, length)) / length
return scores
def __UpperCAmelCase ( self : str) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = None
_UpperCamelCase = 20
_UpperCamelCase = self._get_uniform_logits(batch_size=2 , length=lowercase_)
# tweak scores to not be uniform anymore
_UpperCamelCase = scores.at[1, 5].set((1 / length) + 0.1) # peak, 1st batch
_UpperCamelCase = scores.at[1, 10].set((1 / length) - 0.4) # valley, 1st batch
# compute softmax
_UpperCamelCase = jax.nn.softmax(lowercase_ , axis=-1)
_UpperCamelCase = FlaxTemperatureLogitsWarper(temperature=0.5)
_UpperCamelCase = FlaxTemperatureLogitsWarper(temperature=1.3)
_UpperCamelCase = jax.nn.softmax(temp_dist_warper_sharper(lowercase_ , scores.copy() , cur_len=lowercase_) , axis=-1)
_UpperCamelCase = jax.nn.softmax(temp_dist_warper_smoother(lowercase_ , scores.copy() , cur_len=lowercase_) , axis=-1)
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3))
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3))
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max())
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min())
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max())
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min())
def __UpperCAmelCase ( self : Tuple) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = None
_UpperCamelCase = 10
_UpperCamelCase = 2
# create ramp distribution
_UpperCamelCase = np.broadcast_to(np.arange(lowercase_)[None, :] , (batch_size, vocab_size)).copy()
_UpperCamelCase = ramp_logits[1:, : vocab_size // 2] + vocab_size
_UpperCamelCase = FlaxTopKLogitsWarper(3)
_UpperCamelCase = top_k_warp(lowercase_ , lowercase_ , cur_len=lowercase_)
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0]).tolist() , 7 * [True] + 3 * [False])
self.assertListEqual(jnp.isinf(scores[1]).tolist() , 2 * [True] + 3 * [False] + 5 * [True])
# check special case
_UpperCamelCase = 5
_UpperCamelCase = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3)
_UpperCamelCase = np.broadcast_to(np.arange(lowercase_)[None, :] , (batch_size, length)).copy()
_UpperCamelCase = top_k_warp_safety_check(lowercase_ , lowercase_ , cur_len=lowercase_)
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1).tolist() , [2, 2])
def __UpperCAmelCase ( self : str) -> Tuple:
"""simple docstring"""
_UpperCamelCase = None
_UpperCamelCase = 10
_UpperCamelCase = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
_UpperCamelCase = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]]))
_UpperCamelCase = FlaxTopPLogitsWarper(0.8)
_UpperCamelCase = np.exp(top_p_warp(lowercase_ , lowercase_ , cur_len=lowercase_))
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
_UpperCamelCase = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]])
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-3))
# check edge cases with negative and extreme logits
_UpperCamelCase = np.broadcast_to(np.arange(lowercase_)[None, :] , (batch_size, vocab_size)).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
_UpperCamelCase = ramp_logits[1] * 1_00.0
# make sure at least 2 tokens are kept
_UpperCamelCase = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0)
_UpperCamelCase = top_p_warp(lowercase_ , lowercase_ , cur_len=lowercase_)
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1).tolist() , [3, 2])
def __UpperCAmelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
_UpperCamelCase = 20
_UpperCamelCase = 4
_UpperCamelCase = 0
_UpperCamelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowercase_)
# check that min length is applied at length 5
_UpperCamelCase = ids_tensor((batch_size, 20) , vocab_size=20)
_UpperCamelCase = 5
_UpperCamelCase = self._get_uniform_logits(lowercase_ , lowercase_)
_UpperCamelCase = min_dist_processor(lowercase_ , lowercase_ , cur_len=lowercase_)
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("inf")])
# check that min length is not applied anymore at length 15
_UpperCamelCase = self._get_uniform_logits(lowercase_ , lowercase_)
_UpperCamelCase = 15
_UpperCamelCase = min_dist_processor(lowercase_ , lowercase_ , cur_len=lowercase_)
self.assertFalse(jnp.isinf(lowercase_).any())
def __UpperCAmelCase ( self : List[str]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = 20
_UpperCamelCase = 4
_UpperCamelCase = 0
_UpperCamelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowercase_)
# check that all scores are -inf except the bos_token_id score
_UpperCamelCase = ids_tensor((batch_size, 1) , vocab_size=20)
_UpperCamelCase = 1
_UpperCamelCase = self._get_uniform_logits(lowercase_ , lowercase_)
_UpperCamelCase = logits_processor(lowercase_ , lowercase_ , cur_len=lowercase_)
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :]).all())
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0]) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
_UpperCamelCase = 3
_UpperCamelCase = self._get_uniform_logits(lowercase_ , lowercase_)
_UpperCamelCase = logits_processor(lowercase_ , lowercase_ , cur_len=lowercase_)
self.assertFalse(jnp.isinf(lowercase_).any())
def __UpperCAmelCase ( self : Tuple) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = 20
_UpperCamelCase = 4
_UpperCamelCase = 0
_UpperCamelCase = 5
_UpperCamelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=lowercase_ , eos_token_id=lowercase_)
# check that all scores are -inf except the eos_token_id when max_length is reached
_UpperCamelCase = ids_tensor((batch_size, 4) , vocab_size=20)
_UpperCamelCase = 4
_UpperCamelCase = self._get_uniform_logits(lowercase_ , lowercase_)
_UpperCamelCase = logits_processor(lowercase_ , lowercase_ , cur_len=lowercase_)
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :]).all())
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0]) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
_UpperCamelCase = 3
_UpperCamelCase = self._get_uniform_logits(lowercase_ , lowercase_)
_UpperCamelCase = logits_processor(lowercase_ , lowercase_ , cur_len=lowercase_)
self.assertFalse(jnp.isinf(lowercase_).any())
def __UpperCAmelCase ( self : List[str]) -> Any:
"""simple docstring"""
_UpperCamelCase = 4
_UpperCamelCase = 10
_UpperCamelCase = 15
_UpperCamelCase = 2
_UpperCamelCase = 1
_UpperCamelCase = 15
# dummy input_ids and scores
_UpperCamelCase = ids_tensor((batch_size, sequence_length) , lowercase_)
_UpperCamelCase = input_ids.copy()
_UpperCamelCase = self._get_uniform_logits(lowercase_ , lowercase_)
_UpperCamelCase = scores.copy()
# instantiate all dist processors
_UpperCamelCase = FlaxTemperatureLogitsWarper(temperature=0.5)
_UpperCamelCase = FlaxTopKLogitsWarper(3)
_UpperCamelCase = FlaxTopPLogitsWarper(0.8)
# instantiate all logits processors
_UpperCamelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowercase_)
_UpperCamelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowercase_)
_UpperCamelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=lowercase_ , eos_token_id=lowercase_)
_UpperCamelCase = 10
# no processor list
_UpperCamelCase = temp_dist_warp(lowercase_ , lowercase_ , cur_len=lowercase_)
_UpperCamelCase = top_k_warp(lowercase_ , lowercase_ , cur_len=lowercase_)
_UpperCamelCase = top_p_warp(lowercase_ , lowercase_ , cur_len=lowercase_)
_UpperCamelCase = min_dist_proc(lowercase_ , lowercase_ , cur_len=lowercase_)
_UpperCamelCase = bos_dist_proc(lowercase_ , lowercase_ , cur_len=lowercase_)
_UpperCamelCase = eos_dist_proc(lowercase_ , lowercase_ , cur_len=lowercase_)
# with processor list
_UpperCamelCase = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc])
_UpperCamelCase = processor(lowercase_ , lowercase_ , cur_len=lowercase_)
# scores should be equal
self.assertTrue(jnp.allclose(lowercase_ , lowercase_ , atol=1e-3))
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist())
def __UpperCAmelCase ( self : List[Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = 4
_UpperCamelCase = 10
_UpperCamelCase = 15
_UpperCamelCase = 2
_UpperCamelCase = 1
_UpperCamelCase = 15
# dummy input_ids and scores
_UpperCamelCase = ids_tensor((batch_size, sequence_length) , lowercase_)
_UpperCamelCase = input_ids.copy()
_UpperCamelCase = self._get_uniform_logits(lowercase_ , lowercase_)
_UpperCamelCase = scores.copy()
# instantiate all dist processors
_UpperCamelCase = FlaxTemperatureLogitsWarper(temperature=0.5)
_UpperCamelCase = FlaxTopKLogitsWarper(3)
_UpperCamelCase = FlaxTopPLogitsWarper(0.8)
# instantiate all logits processors
_UpperCamelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowercase_)
_UpperCamelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowercase_)
_UpperCamelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=lowercase_ , eos_token_id=lowercase_)
_UpperCamelCase = 10
# no processor list
def run_no_processor_list(lowercase_ : Any , lowercase_ : Any , lowercase_ : int):
_UpperCamelCase = temp_dist_warp(lowercase_ , lowercase_ , cur_len=lowercase_)
_UpperCamelCase = top_k_warp(lowercase_ , lowercase_ , cur_len=lowercase_)
_UpperCamelCase = top_p_warp(lowercase_ , lowercase_ , cur_len=lowercase_)
_UpperCamelCase = min_dist_proc(lowercase_ , lowercase_ , cur_len=lowercase_)
_UpperCamelCase = bos_dist_proc(lowercase_ , lowercase_ , cur_len=lowercase_)
_UpperCamelCase = eos_dist_proc(lowercase_ , lowercase_ , cur_len=lowercase_)
return scores
# with processor list
def run_processor_list(lowercase_ : Any , lowercase_ : int , lowercase_ : Tuple):
_UpperCamelCase = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc])
_UpperCamelCase = processor(lowercase_ , lowercase_ , cur_len=lowercase_)
return scores
_UpperCamelCase = jax.jit(lowercase_)
_UpperCamelCase = jax.jit(lowercase_)
_UpperCamelCase = jitted_run_no_processor_list(lowercase_ , lowercase_ , lowercase_)
_UpperCamelCase = jitted_run_processor_list(lowercase_ , lowercase_ , lowercase_)
# scores should be equal
self.assertTrue(jnp.allclose(lowercase_ , lowercase_ , atol=1e-3))
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist())
| 82 | import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
torch.manual_seed(0)
_UpperCamelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def __UpperCAmelCase ( self : List[Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = self.dummy_uncond_unet
_UpperCamelCase = KarrasVeScheduler()
_UpperCamelCase = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_)
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = pipe(num_inference_steps=2 , generator=lowercase_ , output_type="numpy").images
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = pipe(num_inference_steps=2 , generator=lowercase_ , output_type="numpy" , return_dict=lowercase_)[0]
_UpperCamelCase = image[0, -3:, -3:, -1]
_UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
_UpperCamelCase = "google/ncsnpp-celebahq-256"
_UpperCamelCase = UNetaDModel.from_pretrained(lowercase_)
_UpperCamelCase = KarrasVeScheduler()
_UpperCamelCase = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_)
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = pipe(num_inference_steps=20 , generator=lowercase_ , output_type="numpy").images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_UpperCamelCase = np.array([0.5_78, 0.58_11, 0.59_24, 0.58_09, 0.5_87, 0.58_86, 0.58_61, 0.58_02, 0.5_86])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 82 | 1 |
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ ) ->Dict:
'''simple docstring'''
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
_UpperCamelCase = mf_knapsack(i - 1 , a__ , a__ , a__ )
else:
_UpperCamelCase = max(
mf_knapsack(i - 1 , a__ , a__ , a__ ) , mf_knapsack(i - 1 , a__ , a__ , j - wt[i - 1] ) + val[i - 1] , )
_UpperCamelCase = val
return f[i][j]
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ ) ->Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
_UpperCamelCase = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
_UpperCamelCase = dp[i - 1][w_]
return dp[n][w_], dp
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->Dict:
'''simple docstring'''
if not (isinstance(a__ , (list, tuple) ) and isinstance(a__ , (list, tuple) )):
raise ValueError(
"Both the weights and values vectors must be either lists or tuples" )
_UpperCamelCase = len(a__ )
if num_items != len(a__ ):
_UpperCamelCase = (
"The number of weights must be the same as the number of values.\n"
f'But got {num_items} weights and {len(a__ )} values'
)
raise ValueError(a__ )
for i in range(a__ ):
if not isinstance(wt[i] , a__ ):
_UpperCamelCase = (
"All weights must be integers but got weight of "
f'type {type(wt[i] )} at index {i}'
)
raise TypeError(a__ )
_UpperCamelCase , _UpperCamelCase = knapsack(a__ , a__ , a__ , a__ )
_UpperCamelCase = set()
_construct_solution(a__ , a__ , a__ , a__ , a__ )
return optimal_val, example_optional_set
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ , a__ ) ->Dict:
'''simple docstring'''
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(a__ , a__ , i - 1 , a__ , a__ )
else:
optimal_set.add(a__ )
_construct_solution(a__ , a__ , i - 1 , j - wt[i - 1] , a__ )
if __name__ == "__main__":
lowerCamelCase__ = [3, 2, 4, 4]
lowerCamelCase__ = [4, 3, 2, 3]
lowerCamelCase__ = 4
lowerCamelCase__ = 6
lowerCamelCase__ = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
lowerCamelCase__,lowerCamelCase__ = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
lowerCamelCase__,lowerCamelCase__ = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print('''optimal_value = ''', optimal_solution)
print('''An optimal subset corresponding to the optimal value''', optimal_subset)
| 82 | import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__A = MODEL_FOR_MASKED_LM_MAPPING
__A = TF_MODEL_FOR_MASKED_LM_MAPPING
def __UpperCAmelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def __UpperCAmelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf")
_UpperCamelCase = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is grouped", "score": 2.1e-0_5, "token": 38015, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-0_5, "token": 25506, "token_str": " accuser"},
] , )
_UpperCamelCase = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-0_5,
"token": 38015,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-0_5,
"token": 25506,
"token_str": " accuser",
},
] , )
_UpperCamelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3)
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is Clara", "score": 2e-0_5, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-0_5, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-0_5, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt")
_UpperCamelCase = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is Maul", "score": 2.2e-0_5, "token": 35676, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-0_5, "token": 16416, "token_str": "ELS"},
] , )
_UpperCamelCase = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-0_5,
"token": 35676,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-0_5, "token": 16416, "token_str": "ELS"},
] , )
_UpperCamelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3)
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is Patrick", "score": 2.1e-0_5, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-0_5, "token": 2941, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-0_5, "token": 13606, "token_str": " Clara"},
] , )
_UpperCamelCase = unmasker("My name is <mask> <mask>" , top_k=2)
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
[
{
"score": 2.2e-0_5,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-0_5, "token": 16416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-0_5,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-0_5, "token": 16416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def __UpperCAmelCase ( self : Tuple) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt")
# convert model to fp16
pipe.model.half()
_UpperCamelCase = pipe("Paris is the [MASK] of France.")
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(lowercase_ , lowercase_)
@slow
@require_torch
def __UpperCAmelCase ( self : List[Any]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt")
self.run_large_test(lowercase_)
@slow
@require_tf
def __UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf")
self.run_large_test(lowercase_)
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : int) -> Any:
"""simple docstring"""
_UpperCamelCase = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(lowercase_) , [
{"sequence": "My name is John", "score": 0.0_08, "token": 610, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.0_07, "token": 1573, "token_str": " Chris"},
] , )
_UpperCamelCase = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(lowercase_) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.2_51,
"token": 2201,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.2_14,
"token": 12790,
"token_str": " Lyon",
},
] , )
_UpperCamelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3)
self.assertEqual(
nested_simplify(lowercase_) , [
{"sequence": "My name is Patrick", "score": 0.0_05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.0_00, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.0_00, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt")
_UpperCamelCase = None
_UpperCamelCase = None
self.run_pipeline_test(lowercase_ , [])
@require_tf
def __UpperCAmelCase ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf")
_UpperCamelCase = None
_UpperCamelCase = None
self.run_pipeline_test(lowercase_ , [])
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Optional[int]) -> int:
"""simple docstring"""
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)")
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = [
f'This is another {tokenizer.mask_token} test',
]
return fill_masker, examples
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[int]) -> str:
"""simple docstring"""
_UpperCamelCase = fill_masker.tokenizer
_UpperCamelCase = fill_masker.model
_UpperCamelCase = fill_masker(
f'This is a {tokenizer.mask_token}' , )
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = fill_masker([f'This is a {tokenizer.mask_token}'])
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = fill_masker([f'This is a {tokenizer.mask_token}', f'Another {tokenizer.mask_token} great test.'])
self.assertEqual(
lowercase_ , [
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
] , )
with self.assertRaises(lowercase_):
fill_masker([None])
# No mask_token is not supported
with self.assertRaises(lowercase_):
fill_masker("This is")
self.run_test_top_k(lowercase_ , lowercase_)
self.run_test_targets(lowercase_ , lowercase_)
self.run_test_top_k_targets(lowercase_ , lowercase_)
self.fill_mask_with_duplicate_targets_and_top_k(lowercase_ , lowercase_)
self.fill_mask_with_multiple_masks(lowercase_ , lowercase_)
def __UpperCAmelCase ( self : int , lowercase_ : Dict , lowercase_ : List[str]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = tokenizer.get_vocab()
_UpperCamelCase = sorted(vocab.keys())[:2]
# Pipeline argument
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_ , targets=lowercase_)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}')
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , lowercase_)
_UpperCamelCase = [tokenizer.decode([x]) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(lowercase_))
# Call argument
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase_)
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , lowercase_)
_UpperCamelCase = [tokenizer.decode([x]) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(lowercase_))
# Score equivalence
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase_)
_UpperCamelCase = [top_mask["token_str"] for top_mask in outputs]
_UpperCamelCase = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowercase_) == set(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase_)
_UpperCamelCase = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(lowercase_) , nested_simplify(lowercase_))
# Raises with invalid
with self.assertRaises(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[])
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[""])
with self.assertRaises(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets="")
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : str , lowercase_ : List[str]) -> Any:
"""simple docstring"""
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_ , top_k=2)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}')
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2)
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
self.assertEqual(nested_simplify(lowercase_) , nested_simplify(lowercase_))
def __UpperCAmelCase ( self : Any , lowercase_ : Union[str, Any] , lowercase_ : Tuple) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = tokenizer.get_vocab()
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
# top_k=2, ntargets=3
_UpperCamelCase = sorted(vocab.keys())[:3]
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2 , targets=lowercase_)
# If we use the most probably targets, and filter differently, we should still
# have the same results
_UpperCamelCase = [el["token_str"] for el in sorted(lowercase_ , key=lambda lowercase_: x["score"] , reverse=lowercase_)]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowercase_).issubset(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=3 , targets=lowercase_)
# They should yield exactly the same result
self.assertEqual(nested_simplify(lowercase_) , nested_simplify(lowercase_))
def __UpperCAmelCase ( self : int , lowercase_ : Optional[int] , lowercase_ : List[str]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = tokenizer.get_vocab()
# String duplicates + id duplicates
_UpperCamelCase = sorted(vocab.keys())[:3]
_UpperCamelCase = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_UpperCamelCase = fill_masker(f'My name is {tokenizer.mask_token}' , targets=lowercase_ , top_k=10)
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(lowercase_) , 3)
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Any) -> Dict:
"""simple docstring"""
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = fill_masker(
f'This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}' , top_k=2)
self.assertEqual(
lowercase_ , [
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
] , )
| 82 | 1 |
from __future__ import annotations
from typing import TypedDict
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = 42
__A = 42
def lowerCAmelCase__ ( a__ ) ->list[str]:
'''simple docstring'''
if not isinstance(a__ , a__ ):
raise TypeError("The parameter s type must be str." )
return [s[i:] + s[:i] for i in range(len(a__ ) )]
def lowerCAmelCase__ ( a__ ) ->BWTTransformDict:
'''simple docstring'''
if not isinstance(a__ , a__ ):
raise TypeError("The parameter s type must be str." )
if not s:
raise ValueError("The parameter s must not be empty." )
_UpperCamelCase = all_rotations(a__ )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_UpperCamelCase = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(a__ ),
}
return response
def lowerCAmelCase__ ( a__ , a__ ) ->str:
'''simple docstring'''
if not isinstance(a__ , a__ ):
raise TypeError("The parameter bwt_string type must be str." )
if not bwt_string:
raise ValueError("The parameter bwt_string must not be empty." )
try:
_UpperCamelCase = int(a__ )
except ValueError:
raise TypeError(
"The parameter idx_original_string type must be int or passive"
" of cast to int." )
if idx_original_string < 0:
raise ValueError("The parameter idx_original_string must not be lower than 0." )
if idx_original_string >= len(a__ ):
raise ValueError(
"The parameter idx_original_string must be lower than" " len(bwt_string)." )
_UpperCamelCase = [""] * len(a__ )
for _ in range(len(a__ ) ):
for i in range(len(a__ ) ):
_UpperCamelCase = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
lowerCamelCase__ = '''Provide a string that I will generate its BWT transform: '''
lowerCamelCase__ = input(entry_msg).strip()
lowerCamelCase__ = bwt_transform(s)
print(
F"Burrows Wheeler transform for string '{s}' results "
F"in '{result['bwt_string']}'"
)
lowerCamelCase__ = reverse_bwt(result['''bwt_string'''], result['''idx_original_string'''])
print(
F"Reversing Burrows Wheeler transform for entry '{result['bwt_string']}' "
F"we get original string '{original_string}'"
)
| 82 | lowerCamelCase__ = '''Alexander Joslin'''
import operator as op
from .stack import Stack
def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
_UpperCamelCase = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
_UpperCamelCase = Stack()
_UpperCamelCase = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(a__ ) )
elif i in operators:
# RULE 2
operator_stack.push(a__ )
elif i == ")":
# RULE 4
_UpperCamelCase = operator_stack.peek()
operator_stack.pop()
_UpperCamelCase = operand_stack.peek()
operand_stack.pop()
_UpperCamelCase = operand_stack.peek()
operand_stack.pop()
_UpperCamelCase = operators[opr](a__ , a__ )
operand_stack.push(a__ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCamelCase__ = '''(5 + ((4 * 2) * (2 + 3)))'''
# answer = 45
print(F"{equation} = {dijkstras_two_stack_algorithm(equation)}")
| 82 | 1 |
from bisect import bisect
from itertools import accumulate
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ ) ->Tuple:
'''simple docstring'''
_UpperCamelCase = sorted(zip(a__ , a__ ) , key=lambda a__ : x[0] / x[1] , reverse=a__ )
_UpperCamelCase , _UpperCamelCase = [i[0] for i in r], [i[1] for i in r]
_UpperCamelCase = list(accumulate(a__ ) )
_UpperCamelCase = bisect(a__ , a__ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | import logging
from transformers import PretrainedConfig
lowerCamelCase__ = logging.getLogger(__name__)
lowerCamelCase__ = {
'''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''',
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''bertabs'''
def __init__( self : List[str] , lowercase_ : int=30522 , lowercase_ : str=512 , lowercase_ : int=6 , lowercase_ : Optional[Any]=512 , lowercase_ : Optional[Any]=8 , lowercase_ : Optional[int]=512 , lowercase_ : Tuple=0.2 , lowercase_ : Union[str, Any]=6 , lowercase_ : List[Any]=768 , lowercase_ : List[str]=8 , lowercase_ : int=2048 , lowercase_ : Tuple=0.2 , **lowercase_ : str , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**lowercase_)
_UpperCamelCase = vocab_size
_UpperCamelCase = max_pos
_UpperCamelCase = enc_layers
_UpperCamelCase = enc_hidden_size
_UpperCamelCase = enc_heads
_UpperCamelCase = enc_ff_size
_UpperCamelCase = enc_dropout
_UpperCamelCase = dec_layers
_UpperCamelCase = dec_hidden_size
_UpperCamelCase = dec_heads
_UpperCamelCase = dec_ff_size
_UpperCamelCase = dec_dropout
| 82 | 1 |
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( enum.Enum ):
'''simple docstring'''
__A = 0
__A = 1
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''generated'''
def __init__( self : Any , *lowercase_ : Dict , **lowercase_ : Tuple) -> List[Any]:
"""simple docstring"""
super().__init__(*lowercase_ , **lowercase_)
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING)
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : Union[str, Any]=None , lowercase_ : Optional[Any]=None , lowercase_ : Optional[int]=None , lowercase_ : Optional[Any]=None , lowercase_ : Any=None , lowercase_ : Union[str, Any]=None , **lowercase_ : Optional[Any] , ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = {}
if truncation is not None:
_UpperCamelCase = truncation
_UpperCamelCase = generate_kwargs
_UpperCamelCase = {}
if return_tensors is not None and return_type is None:
_UpperCamelCase = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
_UpperCamelCase = return_type
if clean_up_tokenization_spaces is not None:
_UpperCamelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
_UpperCamelCase = self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_)
if len(lowercase_) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim.")
_UpperCamelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __UpperCAmelCase ( self : int , lowercase_ : int , lowercase_ : int , lowercase_ : int) -> Any:
"""simple docstring"""
return True
def __UpperCAmelCase ( self : Dict , *lowercase_ : List[str] , lowercase_ : List[Any]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = self.model.config.prefix if self.model.config.prefix is not None else ""
if isinstance(args[0] , lowercase_):
if self.tokenizer.pad_token_id is None:
raise ValueError("Please make sure that the tokenizer has a pad_token_id when using a batch input")
_UpperCamelCase = ([prefix + arg for arg in args[0]],)
_UpperCamelCase = True
elif isinstance(args[0] , lowercase_):
_UpperCamelCase = (prefix + args[0],)
_UpperCamelCase = False
else:
raise ValueError(
f' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`')
_UpperCamelCase = self.tokenizer(*lowercase_ , padding=lowercase_ , truncation=lowercase_ , return_tensors=self.framework)
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : List[Any] , *lowercase_ : Any , **lowercase_ : int) -> Dict:
"""simple docstring"""
_UpperCamelCase = super().__call__(*lowercase_ , **lowercase_)
if (
isinstance(args[0] , lowercase_)
and all(isinstance(lowercase_ , lowercase_) for el in args[0])
and all(len(lowercase_) == 1 for res in result)
):
return [res[0] for res in result]
return result
def __UpperCAmelCase ( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : str=TruncationStrategy.DO_NOT_TRUNCATE , **lowercase_ : Dict) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = self._parse_and_tokenize(lowercase_ , truncation=lowercase_ , **lowercase_)
return inputs
def __UpperCAmelCase ( self : str , lowercase_ : str , **lowercase_ : str) -> str:
"""simple docstring"""
if self.framework == "pt":
_UpperCamelCase , _UpperCamelCase = model_inputs["input_ids"].shape
elif self.framework == "tf":
_UpperCamelCase , _UpperCamelCase = tf.shape(model_inputs["input_ids"]).numpy()
_UpperCamelCase = generate_kwargs.get("min_length" , self.model.config.min_length)
_UpperCamelCase = generate_kwargs.get("max_length" , self.model.config.max_length)
self.check_inputs(lowercase_ , generate_kwargs["min_length"] , generate_kwargs["max_length"])
_UpperCamelCase = self.model.generate(**lowercase_ , **lowercase_)
_UpperCamelCase = output_ids.shape[0]
if self.framework == "pt":
_UpperCamelCase = output_ids.reshape(lowercase_ , out_b // in_b , *output_ids.shape[1:])
elif self.framework == "tf":
_UpperCamelCase = tf.reshape(lowercase_ , (in_b, out_b // in_b, *output_ids.shape[1:]))
return {"output_ids": output_ids}
def __UpperCAmelCase ( self : Dict , lowercase_ : str , lowercase_ : int=ReturnType.TEXT , lowercase_ : int=False) -> Tuple:
"""simple docstring"""
_UpperCamelCase = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
_UpperCamelCase = {f'{self.return_name}_token_ids': output_ids}
elif return_type == ReturnType.TEXT:
_UpperCamelCase = {
f'{self.return_name}_text': self.tokenizer.decode(
lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ , )
}
records.append(lowercase_)
return records
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''summary'''
def __call__( self : Optional[Any] , *lowercase_ : int , **lowercase_ : Dict) -> Optional[int]:
"""simple docstring"""
return super().__call__(*lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : List[str] , lowercase_ : int , lowercase_ : int , lowercase_ : int) -> bool:
"""simple docstring"""
if max_length < min_length:
logger.warning(f'Your min_length={min_length} must be inferior than your max_length={max_length}.')
if input_length < max_length:
logger.warning(
f'Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '
"a summarization task, where outputs shorter than the input are typically wanted, you might "
f'consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})')
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''translation'''
def __UpperCAmelCase ( self : Dict , lowercase_ : int , lowercase_ : int , lowercase_ : int) -> int:
"""simple docstring"""
if input_length > 0.9 * max_length:
logger.warning(
f'Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '
"increasing your max_length manually, e.g. translator('...', max_length=400)")
return True
def __UpperCAmelCase ( self : Tuple , *lowercase_ : Any , lowercase_ : List[Any]=TruncationStrategy.DO_NOT_TRUNCATE , lowercase_ : Any=None , lowercase_ : Optional[Any]=None) -> List[str]:
"""simple docstring"""
if getattr(self.tokenizer , "_build_translation_inputs" , lowercase_):
return self.tokenizer._build_translation_inputs(
*lowercase_ , return_tensors=self.framework , truncation=lowercase_ , src_lang=lowercase_ , tgt_lang=lowercase_)
else:
return super()._parse_and_tokenize(*lowercase_ , truncation=lowercase_)
def __UpperCAmelCase ( self : List[str] , lowercase_ : Dict=None , lowercase_ : str=None , **lowercase_ : List[Any]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = super()._sanitize_parameters(**lowercase_)
if src_lang is not None:
_UpperCamelCase = src_lang
if tgt_lang is not None:
_UpperCamelCase = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
_UpperCamelCase = kwargs.get("task" , self.task)
_UpperCamelCase = task.split("_")
if task and len(lowercase_) == 4:
# translation, XX, to YY
_UpperCamelCase = items[1]
_UpperCamelCase = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[str] , *lowercase_ : List[str] , **lowercase_ : str) -> Union[str, Any]:
"""simple docstring"""
return super().__call__(*lowercase_ , **lowercase_)
| 82 | from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowerCamelCase__ = input('''Enter image url: ''').strip()
print(F"Downloading image from {url} ...")
lowerCamelCase__ = BeautifulSoup(requests.get(url).content, '''html.parser''')
# The image URL is in the content field of the first meta tag with property og:image
lowerCamelCase__ = soup.find('''meta''', {'''property''': '''og:image'''})['''content''']
lowerCamelCase__ = requests.get(image_url).content
lowerCamelCase__ = F"{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"
with open(file_name, '''wb''') as fp:
fp.write(image_data)
print(F"Done. Image saved to disk as {file_name}.")
| 82 | 1 |
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
lowerCamelCase__ = logging.getLogger(__name__)
def lowerCAmelCase__ ( ) ->Dict:
'''simple docstring'''
_UpperCamelCase = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path" , type=a__ , default="data/dump.txt" , help="The path to the data." )
parser.add_argument("--tokenizer_type" , type=a__ , default="bert" , choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name" , type=a__ , default="bert-base-uncased" , help="The tokenizer to use." )
parser.add_argument("--dump_file" , type=a__ , default="data/dump" , help="The dump file prefix." )
_UpperCamelCase = parser.parse_args()
logger.info(f'Loading Tokenizer ({args.tokenizer_name})' )
if args.tokenizer_type == "bert":
_UpperCamelCase = BertTokenizer.from_pretrained(args.tokenizer_name )
_UpperCamelCase = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
_UpperCamelCase = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
_UpperCamelCase = RobertaTokenizer.from_pretrained(args.tokenizer_name )
_UpperCamelCase = tokenizer.special_tokens_map["cls_token"] # `<s>`
_UpperCamelCase = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
_UpperCamelCase = GPTaTokenizer.from_pretrained(args.tokenizer_name )
_UpperCamelCase = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
_UpperCamelCase = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(f'Loading text from {args.file_path}' )
with open(args.file_path , "r" , encoding="utf8" ) as fp:
_UpperCamelCase = fp.readlines()
logger.info("Start encoding" )
logger.info(f'{len(a__ )} examples to process.' )
_UpperCamelCase = []
_UpperCamelCase = 0
_UpperCamelCase = 10_000
_UpperCamelCase = time.time()
for text in data:
_UpperCamelCase = f'{bos} {text.strip()} {sep}'
_UpperCamelCase = tokenizer.encode(a__ , add_special_tokens=a__ )
rslt.append(a__ )
iter += 1
if iter % interval == 0:
_UpperCamelCase = time.time()
logger.info(f'{iter} examples processed. - {(end-start):.2f}s/{interval}expl' )
_UpperCamelCase = time.time()
logger.info("Finished binarization" )
logger.info(f'{len(a__ )} examples processed.' )
_UpperCamelCase = f'{args.dump_file}.{args.tokenizer_name}.pickle'
_UpperCamelCase = tokenizer.vocab_size
if vocab_size < (1 << 16):
_UpperCamelCase = [np.uintaa(a__ ) for d in rslt]
else:
_UpperCamelCase = [np.intaa(a__ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f'Dump to {dp_file}' )
with open(a__ , "wb" ) as handle:
pickle.dump(rslt_ , a__ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 82 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'''
),
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''dpr'''
def __init__( self : Optional[Any] , lowercase_ : int=30522 , lowercase_ : str=768 , lowercase_ : List[Any]=12 , lowercase_ : Dict=12 , lowercase_ : str=3072 , lowercase_ : Any="gelu" , lowercase_ : Any=0.1 , lowercase_ : Any=0.1 , lowercase_ : str=512 , lowercase_ : str=2 , lowercase_ : List[Any]=0.02 , lowercase_ : Dict=1e-1_2 , lowercase_ : List[str]=0 , lowercase_ : Union[str, Any]="absolute" , lowercase_ : int = 0 , **lowercase_ : int , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=lowercase_ , **lowercase_)
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_act
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = projection_dim
_UpperCamelCase = position_embedding_type
| 82 | 1 |
import gc
import threading
import time
import psutil
import torch
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = psutil.Process()
_UpperCamelCase = False
def __UpperCAmelCase ( self : Tuple) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = -1
while True:
_UpperCamelCase = max(self.process.memory_info().rss , self.cpu_memory_peak)
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def __UpperCAmelCase ( self : int) -> Dict:
"""simple docstring"""
_UpperCamelCase = True
_UpperCamelCase = threading.Thread(target=self.peak_monitor)
_UpperCamelCase = True
self.thread.start()
def __UpperCAmelCase ( self : Dict) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = False
self.thread.join()
return self.cpu_memory_peak
lowerCamelCase__ = PeakCPUMemory()
def lowerCAmelCase__ ( ) ->str:
'''simple docstring'''
_UpperCamelCase = {"time": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
_UpperCamelCase = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
_UpperCamelCase = torch.cuda.memory_allocated(a__ )
torch.cuda.reset_peak_memory_stats()
return measures
def lowerCAmelCase__ ( a__ ) ->Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = {"time": time.time() - start_measures["time"]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
_UpperCamelCase = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20
_UpperCamelCase = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
_UpperCamelCase = (torch.cuda.memory_allocated(a__ ) - start_measures[str(a__ )]) / 2**20
_UpperCamelCase = (torch.cuda.max_memory_allocated(a__ ) - start_measures[str(a__ )]) / 2**20
return measures
def lowerCAmelCase__ ( a__ , a__ ) ->int:
'''simple docstring'''
print(f'{description}:' )
print(f'- Time: {measures["time"]:.2f}s' )
for i in range(torch.cuda.device_count() ):
print(f'- GPU {i} allocated: {measures[str(a__ )]:.2f}MiB' )
_UpperCamelCase = measures[f'{i}-peak']
print(f'- GPU {i} peak: {peak:.2f}MiB' )
print(f'- CPU RAM allocated: {measures["cpu"]:.2f}MiB' )
print(f'- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB' )
| 82 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
'''configuration_table_transformer''': [
'''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TableTransformerConfig''',
'''TableTransformerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TableTransformerForObjectDetection''',
'''TableTransformerModel''',
'''TableTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 82 | 1 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/config.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/config.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/config.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/config.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/config.json''',
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''t5'''
__A = ['''past_key_values''']
__A = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : Any , lowercase_ : Union[str, Any]=32128 , lowercase_ : Optional[Any]=512 , lowercase_ : Optional[int]=64 , lowercase_ : Any=2048 , lowercase_ : List[Any]=6 , lowercase_ : str=None , lowercase_ : List[Any]=8 , lowercase_ : str=32 , lowercase_ : Dict=128 , lowercase_ : List[Any]=0.1 , lowercase_ : Optional[Any]=1e-6 , lowercase_ : str=1.0 , lowercase_ : Optional[int]="relu" , lowercase_ : Tuple=True , lowercase_ : str=True , lowercase_ : int=0 , lowercase_ : Optional[Any]=1 , **lowercase_ : Union[str, Any] , ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = vocab_size
_UpperCamelCase = d_model
_UpperCamelCase = d_kv
_UpperCamelCase = d_ff
_UpperCamelCase = num_layers
_UpperCamelCase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
_UpperCamelCase = num_heads
_UpperCamelCase = relative_attention_num_buckets
_UpperCamelCase = relative_attention_max_distance
_UpperCamelCase = dropout_rate
_UpperCamelCase = layer_norm_epsilon
_UpperCamelCase = initializer_factor
_UpperCamelCase = feed_forward_proj
_UpperCamelCase = use_cache
_UpperCamelCase = self.feed_forward_proj.split("-")
_UpperCamelCase = act_info[-1]
_UpperCamelCase = act_info[0] == "gated"
if len(lowercase_) > 1 and act_info[0] != "gated" or len(lowercase_) > 2:
raise ValueError(
f'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'")
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
_UpperCamelCase = "gelu_new"
super().__init__(
pad_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , **lowercase_ , )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self : int) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
_UpperCamelCase = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
_UpperCamelCase = "past_encoder_sequence + sequence"
_UpperCamelCase = {0: "batch"}
_UpperCamelCase = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
_UpperCamelCase = {0: "batch", 1: "decoder_sequence"}
_UpperCamelCase = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(lowercase_ , direction="inputs")
return common_inputs
@property
def __UpperCAmelCase ( self : Union[str, Any]) -> int:
"""simple docstring"""
return 13
| 82 | import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowerCamelCase__ = {
'''cola''': 2,
'''mnli''': 3,
'''mrpc''': 2,
'''sst-2''': 2,
'''sts-b''': 1,
'''qqp''': 2,
'''qnli''': 2,
'''rte''': 2,
'''wnli''': 2,
}
logging.set_verbosity_info()
def lowerCAmelCase__ ( a__ , a__ , a__ , a__=None ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase = XLNetConfig.from_json_file(a__ )
_UpperCamelCase = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f'Building PyTorch XLNetForSequenceClassification model from configuration: {config}' )
_UpperCamelCase = finetuning_task
_UpperCamelCase = GLUE_TASKS_NUM_LABELS[finetuning_task]
_UpperCamelCase = XLNetForSequenceClassification(a__ )
elif "squad" in finetuning_task:
_UpperCamelCase = finetuning_task
_UpperCamelCase = XLNetForQuestionAnswering(a__ )
else:
_UpperCamelCase = XLNetLMHeadModel(a__ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(a__ , a__ , a__ )
# Save pytorch-model
_UpperCamelCase = os.path.join(a__ , a__ )
_UpperCamelCase = os.path.join(a__ , a__ )
print(f'Save PyTorch model to {os.path.abspath(a__ )}' )
torch.save(model.state_dict() , a__ )
print(f'Save configuration file to {os.path.abspath(a__ )}' )
with open(a__ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--xlnet_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained XLNet model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--finetuning_task''',
default=None,
type=str,
help='''Name of a task on which the XLNet TensorFlow model was fine-tuned''',
)
lowerCamelCase__ = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 82 | 1 |
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
lowerCamelCase__ = parse(importlib.metadata.version('''torch'''))
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->Optional[Any]:
'''simple docstring'''
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(f'`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}' )
_UpperCamelCase = STR_OPERATION_TO_FUNC[operation]
if isinstance(a__ , a__ ):
_UpperCamelCase = parse(importlib.metadata.version(a__ ) )
return operation(a__ , parse(a__ ) )
def lowerCAmelCase__ ( a__ , a__ ) ->List[Any]:
'''simple docstring'''
return compare_versions(a__ , a__ , a__ )
| 82 | import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _UpperCAmelCase ( pl.LightningModule ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase_ : Tuple) -> int:
"""simple docstring"""
super().__init__()
_UpperCamelCase = model
_UpperCamelCase = 2
_UpperCamelCase = nn.Linear(self.model.config.hidden_size , self.num_labels)
def __UpperCAmelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
pass
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->str:
'''simple docstring'''
_UpperCamelCase = LongformerModel.from_pretrained(a__ )
_UpperCamelCase = LightningModel(a__ )
_UpperCamelCase = torch.load(a__ , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
_UpperCamelCase = LongformerForQuestionAnswering.from_pretrained(a__ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(a__ )
print(f'Conversion successful. Model saved under {pytorch_dump_folder_path}' )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase__ = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 82 | 1 |
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowerCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : str , *lowercase_ : List[str] , **lowercase_ : Union[str, Any]) -> None:
"""simple docstring"""
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_)
| 82 | import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowerCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : str , *lowercase_ : List[str] , **lowercase_ : Union[str, Any]) -> None:
"""simple docstring"""
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_)
| 82 | 1 |
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
lowerCamelCase__ = (
'''https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'''
)
lowerCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowerCAmelCase__ ( ) ->List[Any]:
'''simple docstring'''
_UpperCamelCase = "https://pypi.org/pypi/diffusers/json"
_UpperCamelCase = json.loads(request.urlopen(a__ ).read() )["releases"].keys()
return sorted(a__ , key=lambda a__ : version.Version(a__ ) )
def lowerCAmelCase__ ( ) ->Union[str, Any]:
'''simple docstring'''
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(a__ )
os.makedirs(a__ , exist_ok=a__ )
_UpperCamelCase = Path(a__ ) / "__init__.py"
if not init_path.exists():
init_path.touch()
def lowerCAmelCase__ ( a__ ) ->Tuple:
'''simple docstring'''
init_hf_modules()
_UpperCamelCase = Path(a__ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(a__ , exist_ok=a__ )
_UpperCamelCase = dynamic_module_path / "__init__.py"
if not init_path.exists():
init_path.touch()
def lowerCAmelCase__ ( a__ ) ->Union[str, Any]:
'''simple docstring'''
with open(a__ , "r" , encoding="utf-8" ) as f:
_UpperCamelCase = f.read()
# Imports of the form `import .xxx`
_UpperCamelCase = re.findall("^\s*import\s+\.(\S+)\s*$" , a__ , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("^\s*from\s+\.(\S+)\s+import" , a__ , flags=re.MULTILINE )
# Unique-ify
return list(set(a__ ) )
def lowerCAmelCase__ ( a__ ) ->str:
'''simple docstring'''
_UpperCamelCase = False
_UpperCamelCase = [module_file]
_UpperCamelCase = []
# Let's recurse through all relative imports
while not no_change:
_UpperCamelCase = []
for f in files_to_check:
new_imports.extend(get_relative_imports(a__ ) )
_UpperCamelCase = Path(a__ ).parent
_UpperCamelCase = [str(module_path / m ) for m in new_imports]
_UpperCamelCase = [f for f in new_import_files if f not in all_relative_imports]
_UpperCamelCase = [f'{f}.py' for f in new_import_files]
_UpperCamelCase = len(a__ ) == 0
all_relative_imports.extend(a__ )
return all_relative_imports
def lowerCAmelCase__ ( a__ ) ->List[Any]:
'''simple docstring'''
with open(a__ , "r" , encoding="utf-8" ) as f:
_UpperCamelCase = f.read()
# Imports of the form `import xxx`
_UpperCamelCase = re.findall("^\s*import\s+(\S+)\s*$" , a__ , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall("^\s*from\s+(\S+)\s+import" , a__ , flags=re.MULTILINE )
# Only keep the top-level module
_UpperCamelCase = [imp.split("." )[0] for imp in imports if not imp.startswith("." )]
# Unique-ify and test we got them all
_UpperCamelCase = list(set(a__ ) )
_UpperCamelCase = []
for imp in imports:
try:
importlib.import_module(a__ )
except ImportError:
missing_packages.append(a__ )
if len(a__ ) > 0:
raise ImportError(
"This modeling file requires the following packages that were not found in your environment: "
f'{", ".join(a__ )}. Run `pip install {" ".join(a__ )}`' )
return get_relative_imports(a__ )
def lowerCAmelCase__ ( a__ , a__ ) ->Optional[int]:
'''simple docstring'''
_UpperCamelCase = module_path.replace(os.path.sep , "." )
_UpperCamelCase = importlib.import_module(a__ )
if class_name is None:
return find_pipeline_class(a__ )
return getattr(a__ , a__ )
def lowerCAmelCase__ ( a__ ) ->Tuple:
'''simple docstring'''
from ..pipelines import DiffusionPipeline
_UpperCamelCase = dict(inspect.getmembers(a__ , inspect.isclass ) )
_UpperCamelCase = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , a__ )
and cls.__module__.split("." )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f'Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'
f' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'
f' {loaded_module}.' )
_UpperCamelCase = cls
return pipeline_class
def lowerCAmelCase__ ( a__ , a__ , a__ = None , a__ = False , a__ = False , a__ = None , a__ = None , a__ = None , a__ = False , ) ->Tuple:
'''simple docstring'''
_UpperCamelCase = str(a__ )
_UpperCamelCase = os.path.join(a__ , a__ )
if os.path.isfile(a__ ):
_UpperCamelCase = module_file_or_url
_UpperCamelCase = "local"
elif pretrained_model_name_or_path.count("/" ) == 0:
_UpperCamelCase = get_diffusers_versions()
# cut ".dev0"
_UpperCamelCase = "v" + ".".join(__version__.split("." )[:3] )
# retrieve github version that matches
if revision is None:
_UpperCamelCase = latest_version if latest_version[1:] in available_versions else "main"
logger.info(f'Defaulting to latest_version: {revision}.' )
elif revision in available_versions:
_UpperCamelCase = f'v{revision}'
elif revision == "main":
_UpperCamelCase = revision
else:
raise ValueError(
f'`custom_revision`: {revision} does not exist. Please make sure to choose one of'
f' {", ".join(available_versions + ["main"] )}.' )
# community pipeline on GitHub
_UpperCamelCase = COMMUNITY_PIPELINES_URL.format(revision=a__ , pipeline=a__ )
try:
_UpperCamelCase = cached_download(
a__ , cache_dir=a__ , force_download=a__ , proxies=a__ , resume_download=a__ , local_files_only=a__ , use_auth_token=a__ , )
_UpperCamelCase = "git"
_UpperCamelCase = pretrained_model_name_or_path + ".py"
except EnvironmentError:
logger.error(f'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
else:
try:
# Load from URL or cache if already cached
_UpperCamelCase = hf_hub_download(
a__ , a__ , cache_dir=a__ , force_download=a__ , proxies=a__ , resume_download=a__ , local_files_only=a__ , use_auth_token=a__ , )
_UpperCamelCase = os.path.join("local" , "--".join(pretrained_model_name_or_path.split("/" ) ) )
except EnvironmentError:
logger.error(f'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
# Check we have all the requirements in our environment
_UpperCamelCase = check_imports(a__ )
# Now we move the module inside our cached dynamic modules.
_UpperCamelCase = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(a__ )
_UpperCamelCase = Path(a__ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(a__ , submodule_path / module_file )
for module_needed in modules_needed:
_UpperCamelCase = f'{module_needed}.py'
shutil.copy(os.path.join(a__ , a__ ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(a__ , a__ ):
_UpperCamelCase = use_auth_token
elif use_auth_token is True:
_UpperCamelCase = HfFolder.get_token()
else:
_UpperCamelCase = None
_UpperCamelCase = model_info(a__ , revision=a__ , token=a__ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
_UpperCamelCase = submodule_path / commit_hash
_UpperCamelCase = full_submodule + os.path.sep + commit_hash
create_dynamic_module(a__ )
if not (submodule_path / module_file).exists():
shutil.copy(a__ , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
a__ , f'{module_needed}.py' , cache_dir=a__ , force_download=a__ , resume_download=a__ , proxies=a__ , use_auth_token=a__ , revision=a__ , local_files_only=a__ , )
return os.path.join(a__ , a__ )
def lowerCAmelCase__ ( a__ , a__ , a__ = None , a__ = None , a__ = False , a__ = False , a__ = None , a__ = None , a__ = None , a__ = False , **a__ , ) ->Tuple:
'''simple docstring'''
_UpperCamelCase = get_cached_module_file(
a__ , a__ , cache_dir=a__ , force_download=a__ , resume_download=a__ , proxies=a__ , use_auth_token=a__ , revision=a__ , local_files_only=a__ , )
return get_class_in_module(a__ , final_module.replace(".py" , "" ) )
| 82 | import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( enum.Enum ):
'''simple docstring'''
__A = 0
__A = 1
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''generated'''
def __init__( self : Any , *lowercase_ : Dict , **lowercase_ : Tuple) -> List[Any]:
"""simple docstring"""
super().__init__(*lowercase_ , **lowercase_)
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING)
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : Union[str, Any]=None , lowercase_ : Optional[Any]=None , lowercase_ : Optional[int]=None , lowercase_ : Optional[Any]=None , lowercase_ : Any=None , lowercase_ : Union[str, Any]=None , **lowercase_ : Optional[Any] , ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = {}
if truncation is not None:
_UpperCamelCase = truncation
_UpperCamelCase = generate_kwargs
_UpperCamelCase = {}
if return_tensors is not None and return_type is None:
_UpperCamelCase = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
_UpperCamelCase = return_type
if clean_up_tokenization_spaces is not None:
_UpperCamelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
_UpperCamelCase = self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_)
if len(lowercase_) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim.")
_UpperCamelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __UpperCAmelCase ( self : int , lowercase_ : int , lowercase_ : int , lowercase_ : int) -> Any:
"""simple docstring"""
return True
def __UpperCAmelCase ( self : Dict , *lowercase_ : List[str] , lowercase_ : List[Any]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = self.model.config.prefix if self.model.config.prefix is not None else ""
if isinstance(args[0] , lowercase_):
if self.tokenizer.pad_token_id is None:
raise ValueError("Please make sure that the tokenizer has a pad_token_id when using a batch input")
_UpperCamelCase = ([prefix + arg for arg in args[0]],)
_UpperCamelCase = True
elif isinstance(args[0] , lowercase_):
_UpperCamelCase = (prefix + args[0],)
_UpperCamelCase = False
else:
raise ValueError(
f' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`')
_UpperCamelCase = self.tokenizer(*lowercase_ , padding=lowercase_ , truncation=lowercase_ , return_tensors=self.framework)
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : List[Any] , *lowercase_ : Any , **lowercase_ : int) -> Dict:
"""simple docstring"""
_UpperCamelCase = super().__call__(*lowercase_ , **lowercase_)
if (
isinstance(args[0] , lowercase_)
and all(isinstance(lowercase_ , lowercase_) for el in args[0])
and all(len(lowercase_) == 1 for res in result)
):
return [res[0] for res in result]
return result
def __UpperCAmelCase ( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : str=TruncationStrategy.DO_NOT_TRUNCATE , **lowercase_ : Dict) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = self._parse_and_tokenize(lowercase_ , truncation=lowercase_ , **lowercase_)
return inputs
def __UpperCAmelCase ( self : str , lowercase_ : str , **lowercase_ : str) -> str:
"""simple docstring"""
if self.framework == "pt":
_UpperCamelCase , _UpperCamelCase = model_inputs["input_ids"].shape
elif self.framework == "tf":
_UpperCamelCase , _UpperCamelCase = tf.shape(model_inputs["input_ids"]).numpy()
_UpperCamelCase = generate_kwargs.get("min_length" , self.model.config.min_length)
_UpperCamelCase = generate_kwargs.get("max_length" , self.model.config.max_length)
self.check_inputs(lowercase_ , generate_kwargs["min_length"] , generate_kwargs["max_length"])
_UpperCamelCase = self.model.generate(**lowercase_ , **lowercase_)
_UpperCamelCase = output_ids.shape[0]
if self.framework == "pt":
_UpperCamelCase = output_ids.reshape(lowercase_ , out_b // in_b , *output_ids.shape[1:])
elif self.framework == "tf":
_UpperCamelCase = tf.reshape(lowercase_ , (in_b, out_b // in_b, *output_ids.shape[1:]))
return {"output_ids": output_ids}
def __UpperCAmelCase ( self : Dict , lowercase_ : str , lowercase_ : int=ReturnType.TEXT , lowercase_ : int=False) -> Tuple:
"""simple docstring"""
_UpperCamelCase = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
_UpperCamelCase = {f'{self.return_name}_token_ids': output_ids}
elif return_type == ReturnType.TEXT:
_UpperCamelCase = {
f'{self.return_name}_text': self.tokenizer.decode(
lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ , )
}
records.append(lowercase_)
return records
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''summary'''
def __call__( self : Optional[Any] , *lowercase_ : int , **lowercase_ : Dict) -> Optional[int]:
"""simple docstring"""
return super().__call__(*lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : List[str] , lowercase_ : int , lowercase_ : int , lowercase_ : int) -> bool:
"""simple docstring"""
if max_length < min_length:
logger.warning(f'Your min_length={min_length} must be inferior than your max_length={max_length}.')
if input_length < max_length:
logger.warning(
f'Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '
"a summarization task, where outputs shorter than the input are typically wanted, you might "
f'consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})')
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''translation'''
def __UpperCAmelCase ( self : Dict , lowercase_ : int , lowercase_ : int , lowercase_ : int) -> int:
"""simple docstring"""
if input_length > 0.9 * max_length:
logger.warning(
f'Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '
"increasing your max_length manually, e.g. translator('...', max_length=400)")
return True
def __UpperCAmelCase ( self : Tuple , *lowercase_ : Any , lowercase_ : List[Any]=TruncationStrategy.DO_NOT_TRUNCATE , lowercase_ : Any=None , lowercase_ : Optional[Any]=None) -> List[str]:
"""simple docstring"""
if getattr(self.tokenizer , "_build_translation_inputs" , lowercase_):
return self.tokenizer._build_translation_inputs(
*lowercase_ , return_tensors=self.framework , truncation=lowercase_ , src_lang=lowercase_ , tgt_lang=lowercase_)
else:
return super()._parse_and_tokenize(*lowercase_ , truncation=lowercase_)
def __UpperCAmelCase ( self : List[str] , lowercase_ : Dict=None , lowercase_ : str=None , **lowercase_ : List[Any]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = super()._sanitize_parameters(**lowercase_)
if src_lang is not None:
_UpperCamelCase = src_lang
if tgt_lang is not None:
_UpperCamelCase = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
_UpperCamelCase = kwargs.get("task" , self.task)
_UpperCamelCase = task.split("_")
if task and len(lowercase_) == 4:
# translation, XX, to YY
_UpperCamelCase = items[1]
_UpperCamelCase = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[str] , *lowercase_ : List[str] , **lowercase_ : str) -> Union[str, Any]:
"""simple docstring"""
return super().__call__(*lowercase_ , **lowercase_)
| 82 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
'''configuration_luke''': ['''LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LukeConfig'''],
'''tokenization_luke''': ['''LukeTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''LUKE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LukeForEntityClassification''',
'''LukeForEntityPairClassification''',
'''LukeForEntitySpanClassification''',
'''LukeForMultipleChoice''',
'''LukeForQuestionAnswering''',
'''LukeForSequenceClassification''',
'''LukeForTokenClassification''',
'''LukeForMaskedLM''',
'''LukeModel''',
'''LukePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 82 | import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'''vocab_file''': '''spiece.model'''}
lowerCamelCase__ = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
}
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCamelCase__ = {
'''t5-small''': 512,
'''t5-base''': 512,
'''t5-large''': 512,
'''t5-3b''': 512,
'''t5-11b''': 512,
}
lowerCamelCase__ = '''▁'''
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , lowercase_ : int , lowercase_ : str="</s>" , lowercase_ : Optional[Any]="<unk>" , lowercase_ : Dict="<pad>" , lowercase_ : Tuple=100 , lowercase_ : str=None , lowercase_ : Optional[Dict[str, Any]] = None , lowercase_ : str=True , **lowercase_ : Optional[Any] , ) -> None:
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
_UpperCamelCase = [f'<extra_id_{i}>' for i in range(lowercase_)]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_UpperCamelCase = len(set(filter(lambda lowercase_: bool("extra_id" in str(lowercase_)) , lowercase_)))
if extra_tokens != extra_ids:
raise ValueError(
f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens")
if legacy:
logger.warning_once(
f'You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'
" read the related pull request available at https://github.com/huggingface/transformers/pull/24565")
_UpperCamelCase = legacy
_UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , extra_ids=lowercase_ , additional_special_tokens=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , legacy=lowercase_ , **lowercase_ , )
_UpperCamelCase = vocab_file
_UpperCamelCase = extra_ids
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(lowercase_)
@staticmethod
def __UpperCAmelCase ( lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : str) -> Any:
"""simple docstring"""
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
_UpperCamelCase = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
f' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
f' {pretrained_model_name_or_path} automatically truncating your input to'
f' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
f' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , lowercase_ , )
return max_model_length
@property
def __UpperCAmelCase ( self : Dict) -> Optional[int]:
"""simple docstring"""
return self.sp_model.get_piece_size() + self._extra_ids
def __UpperCAmelCase ( self : Dict) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = {self.convert_ids_to_tokens(lowercase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __UpperCAmelCase ( self : Dict , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_)
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowercase_)) + [1]
return ([0] * len(lowercase_)) + [1] + ([0] * len(lowercase_)) + [1]
def __UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
return list(
set(filter(lambda lowercase_: bool(re.search(R"<extra_id_\d+>" , lowercase_)) is not None , self.additional_special_tokens)))
def __UpperCAmelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
return [self._convert_token_to_id(lowercase_) for token in self.get_sentinel_tokens()]
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : List[int]) -> List[int]:
"""simple docstring"""
if len(lowercase_) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
" eos tokens being added.")
return token_ids
else:
return token_ids + [self.eos_token_id]
def __UpperCAmelCase ( self : List[str] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCamelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos) * [0]
return len(token_ids_a + eos + token_ids_a + eos) * [0]
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCamelCase = self._add_eos_if_not_present(lowercase_)
if token_ids_a is None:
return token_ids_a
else:
_UpperCamelCase = self._add_eos_if_not_present(lowercase_)
return token_ids_a + token_ids_a
def __getstate__( self : Tuple) -> Any:
"""simple docstring"""
_UpperCamelCase = self.__dict__.copy()
_UpperCamelCase = None
return state
def __setstate__( self : Optional[Any] , lowercase_ : Any) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs"):
_UpperCamelCase = {}
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def __UpperCAmelCase ( self : int , lowercase_ : "TextInput" , **lowercase_ : Optional[int]) -> List[str]:
"""simple docstring"""
if not self.legacy:
_UpperCamelCase = SPIECE_UNDERLINE + text.replace(lowercase_ , " ")
return super().tokenize(lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : int , **lowercase_ : Optional[int]) -> List[str]:
"""simple docstring"""
if not self.legacy:
_UpperCamelCase = text.startswith(lowercase_)
if is_first:
_UpperCamelCase = text[1:]
_UpperCamelCase = self.sp_model.encode(lowercase_ , out_type=lowercase_)
if not self.legacy and not is_first and not text.startswith(" ") and tokens[0].startswith(lowercase_):
_UpperCamelCase = ([tokens[0][1:]] if len(tokens[0]) > 1 else []) + tokens[1:]
return tokens
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Optional[Any]) -> List[Any]:
"""simple docstring"""
if token.startswith("<extra_id_"):
_UpperCamelCase = re.match(R"<extra_id_(\d+)>" , lowercase_)
_UpperCamelCase = int(match.group(1))
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(lowercase_)
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Any) -> int:
"""simple docstring"""
if index < self.sp_model.get_piece_size():
_UpperCamelCase = self.sp_model.IdToPiece(lowercase_)
else:
_UpperCamelCase = f'<extra_id_{self.vocab_size - 1 - index}>'
return token
def __UpperCAmelCase ( self : Dict , lowercase_ : Optional[int]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = []
_UpperCamelCase = ""
_UpperCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase_) + token
_UpperCamelCase = True
_UpperCamelCase = []
else:
current_sub_tokens.append(lowercase_)
_UpperCamelCase = False
out_string += self.sp_model.decode(lowercase_)
return out_string.strip()
def __UpperCAmelCase ( self : List[str] , lowercase_ : str , lowercase_ : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowercase_):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
_UpperCamelCase = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowercase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowercase_)
elif not os.path.isfile(self.vocab_file):
with open(lowercase_ , "wb") as fi:
_UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(lowercase_)
return (out_vocab_file,)
| 82 | 1 |
from __future__ import annotations
def lowerCAmelCase__ ( a__ , a__ = None , a__ = None , a__ = False , ) ->tuple[int, float, str]:
'''simple docstring'''
_UpperCamelCase = cipher_alphabet or [chr(a__ ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
_UpperCamelCase = {
"a": 0.08497,
"b": 0.01492,
"c": 0.02202,
"d": 0.04253,
"e": 0.11162,
"f": 0.02228,
"g": 0.02015,
"h": 0.06094,
"i": 0.07546,
"j": 0.00153,
"k": 0.01292,
"l": 0.04025,
"m": 0.02406,
"n": 0.06749,
"o": 0.07507,
"p": 0.01929,
"q": 0.00095,
"r": 0.07587,
"s": 0.06327,
"t": 0.09356,
"u": 0.02758,
"v": 0.00978,
"w": 0.02560,
"x": 0.00150,
"y": 0.01994,
"z": 0.00077,
}
else:
# Custom frequencies dictionary
_UpperCamelCase = frequencies_dict
if not case_sensitive:
_UpperCamelCase = ciphertext.lower()
# Chi squared statistic values
_UpperCamelCase = {}
# cycle through all of the shifts
for shift in range(len(a__ ) ):
_UpperCamelCase = ""
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
_UpperCamelCase = (alphabet_letters.index(letter.lower() ) - shift) % len(
a__ )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
_UpperCamelCase = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
_UpperCamelCase = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
_UpperCamelCase = decrypted_with_shift.lower().count(a__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
_UpperCamelCase = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
_UpperCamelCase = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
_UpperCamelCase = decrypted_with_shift.count(a__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
_UpperCamelCase = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
_UpperCamelCase = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
_UpperCamelCase = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(a__ ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
_UpperCamelCase = min(
a__ , key=a__ , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 82 | from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowerCAmelCase__ ( a__ ) ->str:
'''simple docstring'''
return getitem, k
def lowerCAmelCase__ ( a__ , a__ ) ->Tuple:
'''simple docstring'''
return setitem, k, v
def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
return delitem, k
def lowerCAmelCase__ ( a__ , a__ , *a__ ) ->List[str]:
'''simple docstring'''
try:
return fun(a__ , *a__ ), None
except Exception as e:
return None, e
lowerCamelCase__ = (
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
)
lowerCamelCase__ = [
_set('''key_a''', '''val_a'''),
_set('''key_a''', '''val_b'''),
]
lowerCamelCase__ = [
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
_del('''key_a'''),
_del('''key_b'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
]
lowerCamelCase__ = [
_get('''key_a'''),
_del('''key_a'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
_del('''key_a'''),
_get('''key_a'''),
]
lowerCamelCase__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
lowerCamelCase__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('''key_a''', '''val_b'''),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def lowerCAmelCase__ ( a__ ) ->Dict:
'''simple docstring'''
_UpperCamelCase = HashMap(initial_block_size=4 )
_UpperCamelCase = {}
for _, (fun, *args) in enumerate(a__ ):
_UpperCamelCase , _UpperCamelCase = _run_operation(a__ , a__ , *a__ )
_UpperCamelCase , _UpperCamelCase = _run_operation(a__ , a__ , *a__ )
assert my_res == py_res
assert str(a__ ) == str(a__ )
assert set(a__ ) == set(a__ )
assert len(a__ ) == len(a__ )
assert set(my.items() ) == set(py.items() )
def lowerCAmelCase__ ( ) ->List[Any]:
'''simple docstring'''
def is_public(a__ ) -> bool:
return not name.startswith("_" )
_UpperCamelCase = {name for name in dir({} ) if is_public(a__ )}
_UpperCamelCase = {name for name in dir(HashMap() ) if is_public(a__ )}
assert dict_public_names > hash_public_names
| 82 | 1 |
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->List[str]:
'''simple docstring'''
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(a__ , n - 1 , a__ ) * a) % mod
else:
_UpperCamelCase = binary_exponentiation(a__ , n / 2 , a__ )
return (b * b) % mod
# a prime number
lowerCamelCase__ = 701
lowerCamelCase__ = 10_0000_0000
lowerCamelCase__ = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 82 | import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _UpperCAmelCase ( lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = KandinskyVaaControlnetImgaImgPipeline
__A = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
__A = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
__A = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
__A = False
@property
def __UpperCAmelCase ( self : List[Any]) -> Tuple:
"""simple docstring"""
return 32
@property
def __UpperCAmelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
return 32
@property
def __UpperCAmelCase ( self : Optional[int]) -> str:
"""simple docstring"""
return self.time_input_dim
@property
def __UpperCAmelCase ( self : List[str]) -> Any:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
return 100
@property
def __UpperCAmelCase ( self : Dict) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0)
_UpperCamelCase = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
_UpperCamelCase = UNetaDConditionModel(**lowercase_)
return model
@property
def __UpperCAmelCase ( self : int) -> Optional[int]:
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __UpperCAmelCase ( self : int) -> Dict:
"""simple docstring"""
torch.manual_seed(0)
_UpperCamelCase = VQModel(**self.dummy_movq_kwargs)
return model
def __UpperCAmelCase ( self : int) -> Any:
"""simple docstring"""
_UpperCamelCase = self.dummy_unet
_UpperCamelCase = self.dummy_movq
_UpperCamelCase = {
"num_train_timesteps": 1000,
"beta_schedule": "linear",
"beta_start": 0.0_00_85,
"beta_end": 0.0_12,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
_UpperCamelCase = DDIMScheduler(**lowercase_)
_UpperCamelCase = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __UpperCAmelCase ( self : str , lowercase_ : Dict , lowercase_ : List[str]=0) -> List[str]:
"""simple docstring"""
_UpperCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowercase_)).to(lowercase_)
_UpperCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to(
lowercase_)
# create init_image
_UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase_)).to(lowercase_)
_UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1)[0]
_UpperCamelCase = Image.fromarray(np.uinta(lowercase_)).convert("RGB").resize((256, 256))
# create hint
_UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase_)).to(lowercase_)
if str(lowercase_).startswith("mps"):
_UpperCamelCase = torch.manual_seed(lowercase_)
else:
_UpperCamelCase = torch.Generator(device=lowercase_).manual_seed(lowercase_)
_UpperCamelCase = {
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def __UpperCAmelCase ( self : Any) -> str:
"""simple docstring"""
_UpperCamelCase = "cpu"
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = self.pipeline_class(**lowercase_)
_UpperCamelCase = pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = pipe(**self.get_dummy_inputs(lowercase_))
_UpperCamelCase = output.images
_UpperCamelCase = pipe(
**self.get_dummy_inputs(lowercase_) , return_dict=lowercase_ , )[0]
_UpperCamelCase = image[0, -3:, -3:, -1]
_UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase = np.array(
[0.54_98_50_34, 0.55_50_93_65, 0.52_56_15_04, 0.5_57_04_94, 0.5_59_38_18, 0.5_26_39_79, 0.50_28_56_43, 0.5_06_98_46, 0.51_19_67_36])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : Union[str, Any]) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
_UpperCamelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy")
_UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png")
_UpperCamelCase = init_image.resize((512, 512))
_UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png")
_UpperCamelCase = torch.from_numpy(np.array(lowercase_)).float() / 2_55.0
_UpperCamelCase = hint.permute(2 , 0 , 1).unsqueeze(0)
_UpperCamelCase = "A robot, 4k photo"
_UpperCamelCase = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa)
pipe_prior.to(lowercase_)
_UpperCamelCase = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa)
_UpperCamelCase = pipeline.to(lowercase_)
pipeline.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = torch.Generator(device="cpu").manual_seed(0)
_UpperCamelCase , _UpperCamelCase = pipe_prior(
lowercase_ , image=lowercase_ , strength=0.85 , generator=lowercase_ , negative_prompt="" , ).to_tuple()
_UpperCamelCase = pipeline(
image=lowercase_ , image_embeds=lowercase_ , negative_image_embeds=lowercase_ , hint=lowercase_ , generator=lowercase_ , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type="np" , )
_UpperCamelCase = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(lowercase_ , lowercase_)
| 82 | 1 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : str , lowercase_ : int=13 , lowercase_ : str=30 , lowercase_ : Tuple=2 , lowercase_ : Tuple=3 , lowercase_ : Tuple=True , lowercase_ : Tuple=True , lowercase_ : Union[str, Any]=32 , lowercase_ : str=2 , lowercase_ : Optional[int]=4 , lowercase_ : Dict=37 , lowercase_ : Optional[int]="gelu" , lowercase_ : Dict=0.1 , lowercase_ : List[str]=0.1 , lowercase_ : Tuple=10 , lowercase_ : Dict=0.02 , lowercase_ : Optional[Any]=3 , lowercase_ : str=None , ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = image_size
_UpperCamelCase = patch_size
_UpperCamelCase = num_channels
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCamelCase = (image_size // patch_size) ** 2
_UpperCamelCase = num_patches + 1
def __UpperCAmelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self : Optional[Any]) -> Any:
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase_ , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : Any) -> Tuple:
"""simple docstring"""
_UpperCamelCase = TFViTModel(config=lowercase_)
_UpperCamelCase = model(lowercase_ , training=lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
# Test with an image with different size than the one specified in config.
_UpperCamelCase = self.image_size // 2
_UpperCamelCase = pixel_values[:, :, :image_size, :image_size]
_UpperCamelCase = model(lowercase_ , interpolate_pos_encoding=lowercase_ , training=lowercase_)
_UpperCamelCase = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size))
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : Dict) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = self.type_sequence_label_size
_UpperCamelCase = TFViTForImageClassification(lowercase_)
_UpperCamelCase = model(lowercase_ , labels=lowercase_ , training=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# Test with an image with different size than the one specified in config.
_UpperCamelCase = self.image_size // 2
_UpperCamelCase = pixel_values[:, :, :image_size, :image_size]
_UpperCamelCase = model(lowercase_ , interpolate_pos_encoding=lowercase_ , training=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
_UpperCamelCase = 1
_UpperCamelCase = TFViTForImageClassification(lowercase_)
_UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
_UpperCamelCase = model(lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def __UpperCAmelCase ( self : Any) -> str:
"""simple docstring"""
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( lowerCAmelCase, lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
__A = (
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
__A = False
__A = False
__A = False
def __UpperCAmelCase ( self : Union[str, Any]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = TFViTModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37)
def __UpperCAmelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds")
def __UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
pass
@unittest.skip(reason="ViT does not use inputs_embeds")
def __UpperCAmelCase ( self : Union[str, Any]) -> Tuple:
"""simple docstring"""
pass
def __UpperCAmelCase ( self : str) -> List[str]:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(lowercase_)
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer))
_UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_ , tf.keras.layers.Layer))
def __UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(lowercase_)
_UpperCamelCase = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase_)
def __UpperCAmelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def __UpperCAmelCase ( self : Tuple) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_)
@slow
def __UpperCAmelCase ( self : List[Any]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = TFViTModel.from_pretrained("google/vit-base-patch16-224")
self.assertIsNotNone(lowercase_)
def lowerCAmelCase__ ( ) ->Dict:
'''simple docstring'''
_UpperCamelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCAmelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224") if is_vision_available() else None
@slow
def __UpperCAmelCase ( self : int) -> Any:
"""simple docstring"""
_UpperCamelCase = TFViTForImageClassification.from_pretrained("google/vit-base-patch16-224")
_UpperCamelCase = self.default_image_processor
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=lowercase_ , return_tensors="tf")
# forward pass
_UpperCamelCase = model(**lowercase_)
# verify the logits
_UpperCamelCase = tf.TensorShape((1, 1000))
self.assertEqual(outputs.logits.shape , lowercase_)
_UpperCamelCase = tf.constant([-0.27_44, 0.82_15, -0.08_36])
tf.debugging.assert_near(outputs.logits[0, :3] , lowercase_ , atol=1e-4)
| 82 | def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
assert (
isinstance(a__ , a__ ) and number_of_steps > 0
), f'number_of_steps needs to be positive integer, your input {number_of_steps}'
if number_of_steps == 1:
return 1
_UpperCamelCase , _UpperCamelCase = 1, 1
for _ in range(number_of_steps - 1 ):
_UpperCamelCase , _UpperCamelCase = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | 1 |
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCamelCase__ = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict , lowercase_ : Optional[int] , lowercase_ : str=7 , lowercase_ : str=3 , lowercase_ : List[Any]=18 , lowercase_ : Tuple=30 , lowercase_ : Tuple=400 , lowercase_ : Union[str, Any]=None , lowercase_ : Optional[Any]=True , lowercase_ : List[Any]=True , lowercase_ : Dict=None , ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = size if size is not None else {"height": 20, "width": 20}
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = num_channels
_UpperCamelCase = image_size
_UpperCamelCase = min_resolution
_UpperCamelCase = max_resolution
_UpperCamelCase = size
_UpperCamelCase = do_normalize
_UpperCamelCase = do_convert_rgb
_UpperCamelCase = [512, 1024, 2048, 4096]
_UpperCamelCase = patch_size if patch_size is not None else {"height": 16, "width": 16}
def __UpperCAmelCase ( self : Optional[int]) -> str:
"""simple docstring"""
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def __UpperCAmelCase ( self : List[Any]) -> str:
"""simple docstring"""
_UpperCamelCase = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
_UpperCamelCase = Image.open(requests.get(lowercase_ , stream=lowercase_).raw).convert("RGB")
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11, reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''', )
@require_torch
@require_vision
class _UpperCAmelCase ( lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = PixaStructImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = PixaStructImageProcessingTester(self)
@property
def __UpperCAmelCase ( self : Optional[Any]) -> int:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowercase_ , "do_normalize"))
self.assertTrue(hasattr(lowercase_ , "do_convert_rgb"))
def __UpperCAmelCase ( self : Optional[int]) -> str:
"""simple docstring"""
_UpperCamelCase = self.image_processor_tester.prepare_dummy_image()
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
_UpperCamelCase = 2048
_UpperCamelCase = image_processor(lowercase_ , return_tensors="pt" , max_patches=lowercase_)
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.06_06) , atol=1e-3 , rtol=1e-3))
def __UpperCAmelCase ( self : List[Any]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_)
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image)
# Test not batched input
_UpperCamelCase = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_UpperCamelCase = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowercase_).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_UpperCamelCase = image_processor(
lowercase_ , return_tensors="pt" , max_patches=lowercase_).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __UpperCAmelCase ( self : Any) -> Tuple:
"""simple docstring"""
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_)
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image)
# Test not batched input
_UpperCamelCase = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
_UpperCamelCase = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(lowercase_):
_UpperCamelCase = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowercase_).flattened_patches
_UpperCamelCase = "Hello"
_UpperCamelCase = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowercase_ , header_text=lowercase_).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_UpperCamelCase = image_processor(
lowercase_ , return_tensors="pt" , max_patches=lowercase_ , header_text=lowercase_).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __UpperCAmelCase ( self : Tuple) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_)
for image in image_inputs:
self.assertIsInstance(lowercase_ , np.ndarray)
_UpperCamelCase = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_UpperCamelCase = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowercase_).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_UpperCamelCase = image_processor(
lowercase_ , return_tensors="pt" , max_patches=lowercase_).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __UpperCAmelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_)
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor)
# Test not batched input
_UpperCamelCase = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_UpperCamelCase = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowercase_).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_UpperCamelCase = image_processor(
lowercase_ , return_tensors="pt" , max_patches=lowercase_).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11, reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''', )
@require_torch
@require_vision
class _UpperCAmelCase ( lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = PixaStructImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self : str) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = PixaStructImageProcessingTester(self , num_channels=4)
_UpperCamelCase = 3
@property
def __UpperCAmelCase ( self : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : str) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowercase_ , "do_normalize"))
self.assertTrue(hasattr(lowercase_ , "do_convert_rgb"))
def __UpperCAmelCase ( self : List[Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_)
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image)
# Test not batched input
_UpperCamelCase = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_UpperCamelCase = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowercase_).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_UpperCamelCase = image_processor(
lowercase_ , return_tensors="pt" , max_patches=lowercase_).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 82 | from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , **lowercase_ : Tuple) -> Any:
"""simple docstring"""
super().__init__(**lowercase_)
if self.framework == "tf":
raise ValueError(f'The {self.__class__} is only available in PyTorch.')
requires_backends(self , "vision")
self.check_model_type(lowercase_)
def __call__( self : str , lowercase_ : Union[str, "Image.Image", List[Dict[str, Any]]] , lowercase_ : Union[str, List[str]] = None , **lowercase_ : str , ) -> List[str]:
"""simple docstring"""
if "text_queries" in kwargs:
_UpperCamelCase = kwargs.pop("text_queries")
if isinstance(lowercase_ , (str, Image.Image)):
_UpperCamelCase = {"image": image, "candidate_labels": candidate_labels}
else:
_UpperCamelCase = image
_UpperCamelCase = super().__call__(lowercase_ , **lowercase_)
return results
def __UpperCAmelCase ( self : Any , **lowercase_ : int) -> List[str]:
"""simple docstring"""
_UpperCamelCase = {}
if "threshold" in kwargs:
_UpperCamelCase = kwargs["threshold"]
if "top_k" in kwargs:
_UpperCamelCase = kwargs["top_k"]
return {}, {}, postprocess_params
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Any) -> List[str]:
"""simple docstring"""
_UpperCamelCase = load_image(inputs["image"])
_UpperCamelCase = inputs["candidate_labels"]
if isinstance(lowercase_ , lowercase_):
_UpperCamelCase = candidate_labels.split(",")
_UpperCamelCase = torch.tensor([[image.height, image.width]] , dtype=torch.intaa)
for i, candidate_label in enumerate(lowercase_):
_UpperCamelCase = self.tokenizer(lowercase_ , return_tensors=self.framework)
_UpperCamelCase = self.image_processor(lowercase_ , return_tensors=self.framework)
yield {
"is_last": i == len(lowercase_) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def __UpperCAmelCase ( self : Dict , lowercase_ : Tuple) -> str:
"""simple docstring"""
_UpperCamelCase = model_inputs.pop("target_size")
_UpperCamelCase = model_inputs.pop("candidate_label")
_UpperCamelCase = model_inputs.pop("is_last")
_UpperCamelCase = self.model(**lowercase_)
_UpperCamelCase = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def __UpperCAmelCase ( self : int , lowercase_ : Tuple , lowercase_ : List[str]=0.1 , lowercase_ : int=None) -> List[str]:
"""simple docstring"""
_UpperCamelCase = []
for model_output in model_outputs:
_UpperCamelCase = model_output["candidate_label"]
_UpperCamelCase = BaseModelOutput(lowercase_)
_UpperCamelCase = self.image_processor.post_process_object_detection(
outputs=lowercase_ , threshold=lowercase_ , target_sizes=model_output["target_size"])[0]
for index in outputs["scores"].nonzero():
_UpperCamelCase = outputs["scores"][index].item()
_UpperCamelCase = self._get_bounding_box(outputs["boxes"][index][0])
_UpperCamelCase = {"score": score, "label": label, "box": box}
results.append(lowercase_)
_UpperCamelCase = sorted(lowercase_ , key=lambda lowercase_: x["score"] , reverse=lowercase_)
if top_k:
_UpperCamelCase = results[:top_k]
return results
def __UpperCAmelCase ( self : str , lowercase_ : "torch.Tensor") -> Dict[str, int]:
"""simple docstring"""
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch.")
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = box.int().tolist()
_UpperCamelCase = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 82 | 1 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
lowerCamelCase__ = '''sshleifer/bart-tiny-random'''
lowerCamelCase__ = '''patrickvonplaten/t5-tiny-random'''
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCAmelCase ( self : List[Any]) -> Any:
"""simple docstring"""
return AutoConfig.from_pretrained(lowercase_)
def __UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
_UpperCamelCase , *_UpperCamelCase = create_student_by_copying_alternating_layers(lowercase_ , tempfile.mkdtemp() , e=1 , d=1)
self.assertEqual(student.config.num_hidden_layers , 1)
def __UpperCAmelCase ( self : int) -> Any:
"""simple docstring"""
_UpperCamelCase , *_UpperCamelCase = create_student_by_copying_alternating_layers(lowercase_ , tempfile.mkdtemp() , e=1 , d=lowercase_)
def __UpperCAmelCase ( self : Optional[Any]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase , *_UpperCamelCase = create_student_by_copying_alternating_layers(lowercase_ , tempfile.mkdtemp() , e=1 , d=lowercase_)
self.assertEqual(student.config.encoder_layers , 1)
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers)
def __UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
_UpperCamelCase , *_UpperCamelCase = create_student_by_copying_alternating_layers(lowercase_ , tempfile.mkdtemp() , e=1 , d=1)
self.assertEqual(student.config.encoder_layers , 1)
self.assertEqual(student.config.decoder_layers , 1)
def __UpperCAmelCase ( self : List[Any]) -> int:
"""simple docstring"""
with self.assertRaises(lowercase_):
create_student_by_copying_alternating_layers(lowercase_ , tempfile.mkdtemp() , e=lowercase_ , d=lowercase_)
| 82 | import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : List[str] , lowercase_ : str) -> str:
"""simple docstring"""
with open(lowercase_ , encoding="utf-8") as input_file:
_UpperCamelCase = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)")
_UpperCamelCase = input_file.read()
_UpperCamelCase = regexp.search(lowercase_)
return match
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : str) -> int:
"""simple docstring"""
with open(lowercase_ , encoding="utf-8") as input_file:
_UpperCamelCase = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL)
_UpperCamelCase = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
_UpperCamelCase = regexp.finditer(lowercase_)
_UpperCamelCase = [match for match in matches if match is not None and match.group(1) is not None]
return matches[0] if matches else None
def __UpperCAmelCase ( self : int) -> int:
"""simple docstring"""
_UpperCamelCase = Path("./datasets")
_UpperCamelCase = list(dataset_paths.absolute().glob("**/*.py"))
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(lowercase_)):
raise AssertionError(f'open(...) must use utf-8 encoding in {dataset}')
def __UpperCAmelCase ( self : str) -> str:
"""simple docstring"""
_UpperCamelCase = Path("./datasets")
_UpperCamelCase = list(dataset_paths.absolute().glob("**/*.py"))
for dataset in dataset_files:
if self._no_print_statements(str(lowercase_)):
raise AssertionError(f'print statement found in {dataset}. Use datasets.logger/logging instead.')
| 82 | 1 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : Tuple , lowercase_ : str , lowercase_ : List[Any]=2 , lowercase_ : List[Any]=3 , lowercase_ : Union[str, Any]=4 , lowercase_ : Union[str, Any]=2 , lowercase_ : Tuple=7 , lowercase_ : int=True , lowercase_ : Tuple=True , lowercase_ : List[str]=True , lowercase_ : List[str]=True , lowercase_ : Dict=99 , lowercase_ : List[str]=36 , lowercase_ : List[Any]=3 , lowercase_ : int=4 , lowercase_ : List[Any]=37 , lowercase_ : Dict="gelu" , lowercase_ : int=0.1 , lowercase_ : Any=0.1 , lowercase_ : str=512 , lowercase_ : List[str]=16 , lowercase_ : Dict=2 , lowercase_ : int=0.02 , lowercase_ : Dict=6 , lowercase_ : str=6 , lowercase_ : List[Any]=3 , lowercase_ : List[str]=4 , lowercase_ : Tuple=None , lowercase_ : Union[str, Any]=1000 , ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = num_channels
_UpperCamelCase = image_size
_UpperCamelCase = patch_size
_UpperCamelCase = text_seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = coordinate_size
_UpperCamelCase = shape_size
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = scope
_UpperCamelCase = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_UpperCamelCase = text_seq_length
_UpperCamelCase = (image_size // patch_size) ** 2 + 1
_UpperCamelCase = self.text_seq_length + self.image_seq_length
def __UpperCAmelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size)
_UpperCamelCase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox)
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
_UpperCamelCase = bbox[i, j, 3]
_UpperCamelCase = bbox[i, j, 1]
_UpperCamelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_UpperCamelCase = bbox[i, j, 2]
_UpperCamelCase = bbox[i, j, 0]
_UpperCamelCase = t
_UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.text_seq_length])
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size)
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels)
_UpperCamelCase = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Any , lowercase_ : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : Tuple) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = LayoutLMvaModel(config=lowercase_)
model.to(lowercase_)
model.eval()
# text + image
_UpperCamelCase = model(lowercase_ , pixel_values=lowercase_)
_UpperCamelCase = model(
lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_)
_UpperCamelCase = model(lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , token_type_ids=lowercase_)
_UpperCamelCase = model(lowercase_ , bbox=lowercase_ , pixel_values=lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
# text only
_UpperCamelCase = model(lowercase_)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size))
# image only
_UpperCamelCase = model(pixel_values=lowercase_)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size))
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : Optional[Any] , lowercase_ : int , lowercase_ : Tuple) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = self.num_labels
_UpperCamelCase = LayoutLMvaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
_UpperCamelCase = model(
lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __UpperCAmelCase ( self : int , lowercase_ : Tuple , lowercase_ : List[Any] , lowercase_ : List[str] , lowercase_ : str , lowercase_ : List[str] , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : Optional[Any]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = self.num_labels
_UpperCamelCase = LayoutLMvaForTokenClassification(config=lowercase_)
model.to(lowercase_)
model.eval()
_UpperCamelCase = model(
lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels))
def __UpperCAmelCase ( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : Any , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any]) -> int:
"""simple docstring"""
_UpperCamelCase = LayoutLMvaForQuestionAnswering(config=lowercase_)
model.to(lowercase_)
model.eval()
_UpperCamelCase = model(
lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def __UpperCAmelCase ( self : List[Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( lowerCAmelCase, lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = False
__A = False
__A = False
__A = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
__A = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def __UpperCAmelCase ( self : str , lowercase_ : Dict , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : int , lowercase_ : Any) -> Tuple:
"""simple docstring"""
return True
def __UpperCAmelCase ( self : int) -> List[str]:
"""simple docstring"""
_UpperCamelCase = LayoutLMvaModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=lowercase_ , hidden_size=37)
def __UpperCAmelCase ( self : str , lowercase_ : Tuple , lowercase_ : Optional[Any] , lowercase_ : Tuple=False) -> List[str]:
"""simple docstring"""
_UpperCamelCase = copy.deepcopy(lowercase_)
if model_class in get_values(lowercase_):
_UpperCamelCase = {
k: v.unsqueeze(1).expand(-1 , self.model_tester.num_choices , -1).contiguous()
if isinstance(lowercase_ , torch.Tensor) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowercase_):
_UpperCamelCase = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=lowercase_)
elif model_class in get_values(lowercase_):
_UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_)
_UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_)
elif model_class in [
*get_values(lowercase_),
]:
_UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_)
elif model_class in [
*get_values(lowercase_),
]:
_UpperCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=lowercase_ , )
return inputs_dict
def __UpperCAmelCase ( self : str) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Optional[Any]) -> int:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def __UpperCAmelCase ( self : List[str]) -> Dict:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCamelCase = type
self.model_tester.create_and_check_model(*lowercase_)
def __UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase_)
def __UpperCAmelCase ( self : int) -> Dict:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_)
def __UpperCAmelCase ( self : List[str]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase_)
@slow
def __UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = LayoutLMvaModel.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
def lowerCAmelCase__ ( ) ->int:
'''simple docstring'''
_UpperCamelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCAmelCase ( self : Dict) -> List[str]:
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=lowercase_) if is_vision_available() else None
@slow
def __UpperCAmelCase ( self : str) -> Tuple:
"""simple docstring"""
_UpperCamelCase = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base").to(lowercase_)
_UpperCamelCase = self.default_image_processor
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=lowercase_ , return_tensors="pt").pixel_values.to(lowercase_)
_UpperCamelCase = torch.tensor([[1, 2]])
_UpperCamelCase = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]]).unsqueeze(0)
# forward pass
_UpperCamelCase = model(
input_ids=input_ids.to(lowercase_) , bbox=bbox.to(lowercase_) , pixel_values=pixel_values.to(lowercase_) , )
# verify the logits
_UpperCamelCase = torch.Size((1, 199, 768))
self.assertEqual(outputs.last_hidden_state.shape , lowercase_)
_UpperCamelCase = torch.tensor(
[[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]]).to(lowercase_)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase_ , atol=1e-4))
| 82 | import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowerCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : str = None , lowercase_ : uuid.UUID = None , lowercase_ : List[Any]=None , lowercase_ : int=None) -> Dict:
"""simple docstring"""
if not conversation_id:
_UpperCamelCase = uuid.uuida()
if past_user_inputs is None:
_UpperCamelCase = []
if generated_responses is None:
_UpperCamelCase = []
_UpperCamelCase = conversation_id
_UpperCamelCase = past_user_inputs
_UpperCamelCase = generated_responses
_UpperCamelCase = text
def __eq__( self : Optional[Any] , lowercase_ : Optional[Any]) -> List[Any]:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def __UpperCAmelCase ( self : List[Any] , lowercase_ : str , lowercase_ : bool = False) -> Any:
"""simple docstring"""
if self.new_user_input:
if overwrite:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
f'with: "{text}".')
_UpperCamelCase = text
else:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
f'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input')
else:
_UpperCamelCase = text
def __UpperCAmelCase ( self : Optional[int]) -> List[Any]:
"""simple docstring"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input)
_UpperCamelCase = None
def __UpperCAmelCase ( self : Dict , lowercase_ : str) -> Optional[Any]:
"""simple docstring"""
self.generated_responses.append(lowercase_)
def __UpperCAmelCase ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : Union[str, Any]) -> int:
"""simple docstring"""
_UpperCamelCase = f'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
_UpperCamelCase = "user" if is_user else "bot"
output += f'{name} >> {text} \n'
return output
@add_end_docstrings(
lowerCAmelCase, R'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''', )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : List[Any] , *lowercase_ : Optional[Any] , **lowercase_ : str) -> List[str]:
"""simple docstring"""
super().__init__(*lowercase_ , **lowercase_)
if self.tokenizer.pad_token_id is None:
_UpperCamelCase = self.tokenizer.eos_token
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Union[str, Any]=None , lowercase_ : int=None , lowercase_ : str=None , **lowercase_ : str) -> Tuple:
"""simple docstring"""
_UpperCamelCase = {}
_UpperCamelCase = {}
_UpperCamelCase = {}
if min_length_for_response is not None:
_UpperCamelCase = min_length_for_response
if minimum_tokens is not None:
_UpperCamelCase = minimum_tokens
if "max_length" in generate_kwargs:
_UpperCamelCase = generate_kwargs["max_length"]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
_UpperCamelCase = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(lowercase_)
return preprocess_params, forward_params, postprocess_params
def __call__( self : Any , lowercase_ : Union[Conversation, List[Conversation]] , lowercase_ : str=0 , **lowercase_ : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = super().__call__(lowercase_ , num_workers=lowercase_ , **lowercase_)
if isinstance(lowercase_ , lowercase_) and len(lowercase_) == 1:
return outputs[0]
return outputs
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Conversation , lowercase_ : Any=32) -> Dict[str, Any]:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_):
raise ValueError("ConversationalPipeline, expects Conversation as inputs")
if conversation.new_user_input is None:
raise ValueError(
f'Conversation with UUID {type(conversation.uuid)} does not contain new user input to process. '
"Add user inputs with the conversation's `add_user_input` method")
if hasattr(self.tokenizer , "_build_conversation_input_ids"):
_UpperCamelCase = self.tokenizer._build_conversation_input_ids(lowercase_)
else:
# If the tokenizer cannot handle conversations, we default to only the old version
_UpperCamelCase = self._legacy_parse_and_tokenize(lowercase_)
if self.framework == "pt":
_UpperCamelCase = torch.LongTensor([input_ids])
elif self.framework == "tf":
_UpperCamelCase = tf.constant([input_ids])
return {"input_ids": input_ids, "conversation": conversation}
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Any , lowercase_ : Optional[int]=10 , **lowercase_ : Dict) -> List[str]:
"""simple docstring"""
_UpperCamelCase = generate_kwargs.get("max_length" , self.model.config.max_length)
_UpperCamelCase = model_inputs["input_ids"].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})')
_UpperCamelCase = max_length - minimum_tokens
_UpperCamelCase = model_inputs["input_ids"][:, -trim:]
if "attention_mask" in model_inputs:
_UpperCamelCase = model_inputs["attention_mask"][:, -trim:]
_UpperCamelCase = model_inputs.pop("conversation")
_UpperCamelCase = max_length
_UpperCamelCase = self.model.generate(**lowercase_ , **lowercase_)
if self.model.config.is_encoder_decoder:
_UpperCamelCase = 1
else:
_UpperCamelCase = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : int=True) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = model_outputs["output_ids"]
_UpperCamelCase = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ , )
_UpperCamelCase = model_outputs["conversation"]
conversation.mark_processed()
conversation.append_response(lowercase_)
return conversation
def __UpperCAmelCase ( self : Any , lowercase_ : Conversation) -> Dict:
"""simple docstring"""
_UpperCamelCase = self.tokenizer.eos_token_id
_UpperCamelCase = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_) + [eos_token_id])
else:
input_ids.extend(self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_))
if len(lowercase_) > self.tokenizer.model_max_length:
_UpperCamelCase = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 82 | 1 |
from sklearn.metrics import fa_score
import datasets
lowerCamelCase__ = '''
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
'''
lowerCamelCase__ = '''
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{\'f1\': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results[\'f1\'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results[\'f1\'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")
>>> print(round(results[\'f1\'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'f1\': array([0.8, 0. , 0. ])}
'''
lowerCamelCase__ = '''
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def __UpperCAmelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32")),
"references": datasets.Sequence(datasets.Value("int32")),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32"),
"references": datasets.Value("int32"),
}) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"] , )
def __UpperCAmelCase ( self : List[str] , lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : str=None , lowercase_ : int=1 , lowercase_ : str="binary" , lowercase_ : List[Any]=None) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = fa_score(
lowercase_ , lowercase_ , labels=lowercase_ , pos_label=lowercase_ , average=lowercase_ , sample_weight=lowercase_)
return {"f1": float(lowercase_) if score.size == 1 else score}
| 82 | def lowerCAmelCase__ ( a__ = 50 ) ->int:
'''simple docstring'''
_UpperCamelCase = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F"{solution() = }")
| 82 | 1 |
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
lowerCamelCase__ = '''Usage of script: script_name <size_of_canvas:int>'''
lowerCamelCase__ = [0] * 100 + [1] * 10
random.shuffle(choice)
def lowerCAmelCase__ ( a__ ) ->list[list[bool]]:
'''simple docstring'''
_UpperCamelCase = [[False for i in range(a__ )] for j in range(a__ )]
return canvas
def lowerCAmelCase__ ( a__ ) ->None:
'''simple docstring'''
for i, row in enumerate(a__ ):
for j, _ in enumerate(a__ ):
_UpperCamelCase = bool(random.getrandbits(1 ) )
def lowerCAmelCase__ ( a__ ) ->list[list[bool]]:
'''simple docstring'''
_UpperCamelCase = np.array(a__ )
_UpperCamelCase = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(a__ ):
for c, pt in enumerate(a__ ):
_UpperCamelCase = __judge_point(
a__ , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
_UpperCamelCase = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
_UpperCamelCase = current_canvas.tolist()
return return_canvas
def lowerCAmelCase__ ( a__ , a__ ) ->bool:
'''simple docstring'''
_UpperCamelCase = 0
_UpperCamelCase = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
_UpperCamelCase = pt
if pt:
if alive < 2:
_UpperCamelCase = False
elif alive == 2 or alive == 3:
_UpperCamelCase = True
elif alive > 3:
_UpperCamelCase = False
else:
if alive == 3:
_UpperCamelCase = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
lowerCamelCase__ = int(sys.argv[1])
# main working structure of this module.
lowerCamelCase__ = create_canvas(canvas_size)
seed(c)
lowerCamelCase__,lowerCamelCase__ = plt.subplots()
fig.show()
lowerCamelCase__ = ListedColormap(['''w''', '''k'''])
try:
while True:
lowerCamelCase__ = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 82 | import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->int:
'''simple docstring'''
_UpperCamelCase = 1.5
_UpperCamelCase = int(factor * num_class_images )
_UpperCamelCase = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=a__ , aesthetic_weight=0.1 )
os.makedirs(f'{class_data_dir}/images' , exist_ok=a__ )
if len(list(Path(f'{class_data_dir}/images' ).iterdir() ) ) >= num_class_images:
return
while True:
_UpperCamelCase = client.query(text=a__ )
if len(a__ ) >= factor * num_class_images or num_images > 1e4:
break
else:
_UpperCamelCase = int(factor * num_images )
_UpperCamelCase = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=a__ , aesthetic_weight=0.1 , )
_UpperCamelCase = 0
_UpperCamelCase = 0
_UpperCamelCase = tqdm(desc="downloading real regularization images" , total=a__ )
with open(f'{class_data_dir}/caption.txt' , "w" ) as fa, open(f'{class_data_dir}/urls.txt' , "w" ) as fa, open(
f'{class_data_dir}/images.txt' , "w" ) as fa:
while total < num_class_images:
_UpperCamelCase = class_images[count]
count += 1
try:
_UpperCamelCase = requests.get(images["url"] )
if img.status_code == 200:
_UpperCamelCase = Image.open(BytesIO(img.content ) )
with open(f'{class_data_dir}/images/{total}.jpg' , "wb" ) as f:
f.write(img.content )
fa.write(images["caption"] + "\n" )
fa.write(images["url"] + "\n" )
fa.write(f'{class_data_dir}/images/{total}.jpg' + "\n" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowerCAmelCase__ ( ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase = argparse.ArgumentParser("" , add_help=a__ )
parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=a__ , type=a__ )
parser.add_argument("--class_data_dir" , help="path to save images" , required=a__ , type=a__ )
parser.add_argument("--num_class_images" , help="number of images to download" , default=200 , type=a__ )
return parser.parse_args()
if __name__ == "__main__":
lowerCamelCase__ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 82 | 1 |
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ ) ->Any:
'''simple docstring'''
_UpperCamelCase = BigBirdConfig.from_json_file(a__ )
print(f'Building PyTorch model from configuration: {config}' )
if is_trivia_qa:
_UpperCamelCase = BigBirdForQuestionAnswering(a__ )
else:
_UpperCamelCase = BigBirdForPreTraining(a__ )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(a__ , a__ , is_trivia_qa=a__ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(a__ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--big_bird_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_trivia_qa''', action='''store_true''', help='''Whether to convert a model with a trivia_qa head.'''
)
lowerCamelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 82 | import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
lowerCamelCase__ = '''\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
'''
lowerCamelCase__ = '''\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.
'''
lowerCamelCase__ = R'''
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting "1/2" to "\\frac{1}{2}")
Examples:
>>> metric = datasets.load_metric("competition_math")
>>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])
>>> print(results)
{\'accuracy\': 1.0}
'''
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def __UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string"),
"references": datasets.Value("string"),
}) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , )
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Tuple , lowercase_ : str) -> Tuple:
"""simple docstring"""
_UpperCamelCase = 0.0
for i, j in zip(lowercase_ , lowercase_):
n_correct += 1.0 if math_equivalence.is_equiv(lowercase_ , lowercase_) else 0.0
_UpperCamelCase = n_correct / len(lowercase_)
return {
"accuracy": accuracy,
}
| 82 | 1 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase_ : int , lowercase_ : Union[str, Any]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = params
_UpperCamelCase = np.array(lowercase_)
_UpperCamelCase = np.array([len(lowercase_) for t in data])
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : List[str] , lowercase_ : str) -> List[Any]:
"""simple docstring"""
return (self.token_ids[index], self.lengths[index])
def __len__( self : Union[str, Any]) -> str:
"""simple docstring"""
return len(self.lengths)
def __UpperCAmelCase ( self : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
assert len(self.token_ids) == len(self.lengths)
assert all(self.lengths[i] == len(self.token_ids[i]) for i in range(len(self.lengths)))
def __UpperCAmelCase ( self : List[Any]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = self.params.max_model_input_size
_UpperCamelCase = self.lengths > max_len
logger.info(f'Splitting {sum(lowercase_)} too long sequences.')
def divide_chunks(lowercase_ : Any , lowercase_ : Union[str, Any]):
return [l[i : i + n] for i in range(0 , len(lowercase_) , lowercase_)]
_UpperCamelCase = []
_UpperCamelCase = []
if self.params.mlm:
_UpperCamelCase , _UpperCamelCase = self.params.special_tok_ids["cls_token"], self.params.special_tok_ids["sep_token"]
else:
_UpperCamelCase , _UpperCamelCase = self.params.special_tok_ids["bos_token"], self.params.special_tok_ids["eos_token"]
for seq_, len_ in zip(self.token_ids , self.lengths):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_)
new_lengths.append(len_)
else:
_UpperCamelCase = []
for sub_s in divide_chunks(seq_ , max_len - 2):
if sub_s[0] != cls_id:
_UpperCamelCase = np.insert(lowercase_ , 0 , lowercase_)
if sub_s[-1] != sep_id:
_UpperCamelCase = np.insert(lowercase_ , len(lowercase_) , lowercase_)
assert len(lowercase_) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(lowercase_)
new_tok_ids.extend(lowercase_)
new_lengths.extend([len(lowercase_) for l in sub_seqs])
_UpperCamelCase = np.array(lowercase_)
_UpperCamelCase = np.array(lowercase_)
def __UpperCAmelCase ( self : List[str]) -> Dict:
"""simple docstring"""
_UpperCamelCase = len(self)
_UpperCamelCase = self.lengths > 11
_UpperCamelCase = self.token_ids[indices]
_UpperCamelCase = self.lengths[indices]
_UpperCamelCase = len(self)
logger.info(f'Remove {init_size - new_size} too short (<=11 tokens) sequences.')
def __UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
if "unk_token" not in self.params.special_tok_ids:
return
else:
_UpperCamelCase = self.params.special_tok_ids["unk_token"]
_UpperCamelCase = len(self)
_UpperCamelCase = np.array([np.count_nonzero(a == unk_token_id) for a in self.token_ids])
_UpperCamelCase = (unk_occs / self.lengths) < 0.5
_UpperCamelCase = self.token_ids[indices]
_UpperCamelCase = self.lengths[indices]
_UpperCamelCase = len(self)
logger.info(f'Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).')
def __UpperCAmelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
if not self.params.is_master:
return
logger.info(f'{len(self)} sequences')
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = [t[0] for t in batch]
_UpperCamelCase = [t[1] for t in batch]
assert len(lowercase_) == len(lowercase_)
# Max for paddings
_UpperCamelCase = max(lowercase_)
# Pad token ids
if self.params.mlm:
_UpperCamelCase = self.params.special_tok_ids["pad_token"]
else:
_UpperCamelCase = self.params.special_tok_ids["unk_token"]
_UpperCamelCase = [list(t.astype(lowercase_)) + [pad_idx] * (max_seq_len_ - len(lowercase_)) for t in token_ids]
assert len(tk_) == len(lowercase_)
assert all(len(lowercase_) == max_seq_len_ for t in tk_)
_UpperCamelCase = torch.tensor(tk_) # (bs, max_seq_len_)
_UpperCamelCase = torch.tensor(lowercase_) # (bs)
return tk_t, lg_t
| 82 | import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
lowerCamelCase__ = 5_0000
lowerCamelCase__ = 5000
lowerCamelCase__,lowerCamelCase__ = os.path.split(__file__)
lowerCamelCase__ = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def lowerCAmelCase__ ( a__ , a__ ) ->int:
'''simple docstring'''
for i in range(a__ ):
_UpperCamelCase = dataset[i]
@get_duration
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->int:
'''simple docstring'''
for i in range(0 , len(a__ ) , a__ ):
_UpperCamelCase = dataset[i : i + batch_size]
@get_duration
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->Union[str, Any]:
'''simple docstring'''
with dataset.formatted_as(type=a__ ):
for i in range(a__ ):
_UpperCamelCase = dataset[i]
@get_duration
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ ) ->Dict:
'''simple docstring'''
with dataset.formatted_as(type=a__ ):
for i in range(0 , a__ , a__ ):
_UpperCamelCase = dataset[i : i + batch_size]
def lowerCAmelCase__ ( ) ->Dict:
'''simple docstring'''
_UpperCamelCase = {"num examples": SPEED_TEST_N_EXAMPLES}
_UpperCamelCase = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted, {"type": "pandas", "length": SMALL_TEST}),
(read_formatted, {"type": "torch", "length": SMALL_TEST}),
(read_formatted, {"type": "tensorflow", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
_UpperCamelCase = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset" )
_UpperCamelCase = datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} )
_UpperCamelCase = generate_example_dataset(
os.path.join(a__ , "dataset.arrow" ) , a__ , num_examples=a__ , seq_shapes={"list": (100,)} , )
print("first set of iterations" )
for func, kwargs in functions:
print(func.__name__ , str(a__ ) )
_UpperCamelCase = func(a__ , **a__ )
print("shuffling dataset" )
_UpperCamelCase = dataset.shuffle()
print("Second set of iterations (after shuffling" )
for func, kwargs in functions_shuffled:
print("shuffled " , func.__name__ , str(a__ ) )
_UpperCamelCase = func(
a__ , **a__ )
with open(a__ , "wb" ) as f:
f.write(json.dumps(a__ ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 82 | 1 |
def lowerCAmelCase__ ( a__ ) ->bool:
'''simple docstring'''
_UpperCamelCase = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
_UpperCamelCase = set()
return any(
node not in visited and depth_first_search(a__ , a__ , a__ , a__ )
for node in graph )
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ ) ->bool:
'''simple docstring'''
visited.add(a__ )
rec_stk.add(a__ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(a__ , a__ , a__ , a__ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(a__ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 82 | import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
torch.manual_seed(0)
_UpperCamelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def __UpperCAmelCase ( self : List[Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = self.dummy_uncond_unet
_UpperCamelCase = KarrasVeScheduler()
_UpperCamelCase = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_)
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = pipe(num_inference_steps=2 , generator=lowercase_ , output_type="numpy").images
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = pipe(num_inference_steps=2 , generator=lowercase_ , output_type="numpy" , return_dict=lowercase_)[0]
_UpperCamelCase = image[0, -3:, -3:, -1]
_UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
_UpperCamelCase = "google/ncsnpp-celebahq-256"
_UpperCamelCase = UNetaDModel.from_pretrained(lowercase_)
_UpperCamelCase = KarrasVeScheduler()
_UpperCamelCase = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_)
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = pipe(num_inference_steps=20 , generator=lowercase_ , output_type="numpy").images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_UpperCamelCase = np.array([0.5_78, 0.58_11, 0.59_24, 0.58_09, 0.5_87, 0.58_86, 0.58_61, 0.58_02, 0.5_86])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 82 | 1 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _UpperCAmelCase ( pl.LightningModule ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase_ : Tuple) -> int:
"""simple docstring"""
super().__init__()
_UpperCamelCase = model
_UpperCamelCase = 2
_UpperCamelCase = nn.Linear(self.model.config.hidden_size , self.num_labels)
def __UpperCAmelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
pass
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->str:
'''simple docstring'''
_UpperCamelCase = LongformerModel.from_pretrained(a__ )
_UpperCamelCase = LightningModel(a__ )
_UpperCamelCase = torch.load(a__ , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
_UpperCamelCase = LongformerForQuestionAnswering.from_pretrained(a__ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(a__ )
print(f'Conversion successful. Model saved under {pytorch_dump_folder_path}' )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase__ = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 82 | import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__A = MODEL_FOR_MASKED_LM_MAPPING
__A = TF_MODEL_FOR_MASKED_LM_MAPPING
def __UpperCAmelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def __UpperCAmelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf")
_UpperCamelCase = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is grouped", "score": 2.1e-0_5, "token": 38015, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-0_5, "token": 25506, "token_str": " accuser"},
] , )
_UpperCamelCase = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-0_5,
"token": 38015,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-0_5,
"token": 25506,
"token_str": " accuser",
},
] , )
_UpperCamelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3)
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is Clara", "score": 2e-0_5, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-0_5, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-0_5, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt")
_UpperCamelCase = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is Maul", "score": 2.2e-0_5, "token": 35676, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-0_5, "token": 16416, "token_str": "ELS"},
] , )
_UpperCamelCase = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-0_5,
"token": 35676,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-0_5, "token": 16416, "token_str": "ELS"},
] , )
_UpperCamelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3)
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is Patrick", "score": 2.1e-0_5, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-0_5, "token": 2941, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-0_5, "token": 13606, "token_str": " Clara"},
] , )
_UpperCamelCase = unmasker("My name is <mask> <mask>" , top_k=2)
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
[
{
"score": 2.2e-0_5,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-0_5, "token": 16416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-0_5,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-0_5, "token": 16416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def __UpperCAmelCase ( self : Tuple) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt")
# convert model to fp16
pipe.model.half()
_UpperCamelCase = pipe("Paris is the [MASK] of France.")
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(lowercase_ , lowercase_)
@slow
@require_torch
def __UpperCAmelCase ( self : List[Any]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt")
self.run_large_test(lowercase_)
@slow
@require_tf
def __UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf")
self.run_large_test(lowercase_)
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : int) -> Any:
"""simple docstring"""
_UpperCamelCase = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(lowercase_) , [
{"sequence": "My name is John", "score": 0.0_08, "token": 610, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.0_07, "token": 1573, "token_str": " Chris"},
] , )
_UpperCamelCase = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(lowercase_) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.2_51,
"token": 2201,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.2_14,
"token": 12790,
"token_str": " Lyon",
},
] , )
_UpperCamelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3)
self.assertEqual(
nested_simplify(lowercase_) , [
{"sequence": "My name is Patrick", "score": 0.0_05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.0_00, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.0_00, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt")
_UpperCamelCase = None
_UpperCamelCase = None
self.run_pipeline_test(lowercase_ , [])
@require_tf
def __UpperCAmelCase ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf")
_UpperCamelCase = None
_UpperCamelCase = None
self.run_pipeline_test(lowercase_ , [])
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Optional[int]) -> int:
"""simple docstring"""
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)")
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = [
f'This is another {tokenizer.mask_token} test',
]
return fill_masker, examples
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[int]) -> str:
"""simple docstring"""
_UpperCamelCase = fill_masker.tokenizer
_UpperCamelCase = fill_masker.model
_UpperCamelCase = fill_masker(
f'This is a {tokenizer.mask_token}' , )
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = fill_masker([f'This is a {tokenizer.mask_token}'])
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = fill_masker([f'This is a {tokenizer.mask_token}', f'Another {tokenizer.mask_token} great test.'])
self.assertEqual(
lowercase_ , [
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
] , )
with self.assertRaises(lowercase_):
fill_masker([None])
# No mask_token is not supported
with self.assertRaises(lowercase_):
fill_masker("This is")
self.run_test_top_k(lowercase_ , lowercase_)
self.run_test_targets(lowercase_ , lowercase_)
self.run_test_top_k_targets(lowercase_ , lowercase_)
self.fill_mask_with_duplicate_targets_and_top_k(lowercase_ , lowercase_)
self.fill_mask_with_multiple_masks(lowercase_ , lowercase_)
def __UpperCAmelCase ( self : int , lowercase_ : Dict , lowercase_ : List[str]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = tokenizer.get_vocab()
_UpperCamelCase = sorted(vocab.keys())[:2]
# Pipeline argument
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_ , targets=lowercase_)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}')
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , lowercase_)
_UpperCamelCase = [tokenizer.decode([x]) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(lowercase_))
# Call argument
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase_)
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , lowercase_)
_UpperCamelCase = [tokenizer.decode([x]) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(lowercase_))
# Score equivalence
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase_)
_UpperCamelCase = [top_mask["token_str"] for top_mask in outputs]
_UpperCamelCase = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowercase_) == set(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase_)
_UpperCamelCase = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(lowercase_) , nested_simplify(lowercase_))
# Raises with invalid
with self.assertRaises(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[])
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[""])
with self.assertRaises(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets="")
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : str , lowercase_ : List[str]) -> Any:
"""simple docstring"""
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_ , top_k=2)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}')
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2)
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
self.assertEqual(nested_simplify(lowercase_) , nested_simplify(lowercase_))
def __UpperCAmelCase ( self : Any , lowercase_ : Union[str, Any] , lowercase_ : Tuple) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = tokenizer.get_vocab()
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
# top_k=2, ntargets=3
_UpperCamelCase = sorted(vocab.keys())[:3]
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2 , targets=lowercase_)
# If we use the most probably targets, and filter differently, we should still
# have the same results
_UpperCamelCase = [el["token_str"] for el in sorted(lowercase_ , key=lambda lowercase_: x["score"] , reverse=lowercase_)]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowercase_).issubset(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=3 , targets=lowercase_)
# They should yield exactly the same result
self.assertEqual(nested_simplify(lowercase_) , nested_simplify(lowercase_))
def __UpperCAmelCase ( self : int , lowercase_ : Optional[int] , lowercase_ : List[str]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = tokenizer.get_vocab()
# String duplicates + id duplicates
_UpperCamelCase = sorted(vocab.keys())[:3]
_UpperCamelCase = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_UpperCamelCase = fill_masker(f'My name is {tokenizer.mask_token}' , targets=lowercase_ , top_k=10)
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(lowercase_) , 3)
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Any) -> Dict:
"""simple docstring"""
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = fill_masker(
f'This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}' , top_k=2)
self.assertEqual(
lowercase_ , [
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
] , )
| 82 | 1 |
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__A = MODEL_FOR_MASKED_LM_MAPPING
__A = TF_MODEL_FOR_MASKED_LM_MAPPING
def __UpperCAmelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def __UpperCAmelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf")
_UpperCamelCase = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is grouped", "score": 2.1e-0_5, "token": 38015, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-0_5, "token": 25506, "token_str": " accuser"},
] , )
_UpperCamelCase = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-0_5,
"token": 38015,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-0_5,
"token": 25506,
"token_str": " accuser",
},
] , )
_UpperCamelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3)
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is Clara", "score": 2e-0_5, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-0_5, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-0_5, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt")
_UpperCamelCase = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is Maul", "score": 2.2e-0_5, "token": 35676, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-0_5, "token": 16416, "token_str": "ELS"},
] , )
_UpperCamelCase = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-0_5,
"token": 35676,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-0_5, "token": 16416, "token_str": "ELS"},
] , )
_UpperCamelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3)
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is Patrick", "score": 2.1e-0_5, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-0_5, "token": 2941, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-0_5, "token": 13606, "token_str": " Clara"},
] , )
_UpperCamelCase = unmasker("My name is <mask> <mask>" , top_k=2)
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
[
{
"score": 2.2e-0_5,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-0_5, "token": 16416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-0_5,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-0_5, "token": 16416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def __UpperCAmelCase ( self : Tuple) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt")
# convert model to fp16
pipe.model.half()
_UpperCamelCase = pipe("Paris is the [MASK] of France.")
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(lowercase_ , lowercase_)
@slow
@require_torch
def __UpperCAmelCase ( self : List[Any]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt")
self.run_large_test(lowercase_)
@slow
@require_tf
def __UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf")
self.run_large_test(lowercase_)
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : int) -> Any:
"""simple docstring"""
_UpperCamelCase = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(lowercase_) , [
{"sequence": "My name is John", "score": 0.0_08, "token": 610, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.0_07, "token": 1573, "token_str": " Chris"},
] , )
_UpperCamelCase = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(lowercase_) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.2_51,
"token": 2201,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.2_14,
"token": 12790,
"token_str": " Lyon",
},
] , )
_UpperCamelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3)
self.assertEqual(
nested_simplify(lowercase_) , [
{"sequence": "My name is Patrick", "score": 0.0_05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.0_00, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.0_00, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt")
_UpperCamelCase = None
_UpperCamelCase = None
self.run_pipeline_test(lowercase_ , [])
@require_tf
def __UpperCAmelCase ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf")
_UpperCamelCase = None
_UpperCamelCase = None
self.run_pipeline_test(lowercase_ , [])
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Optional[int]) -> int:
"""simple docstring"""
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)")
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = [
f'This is another {tokenizer.mask_token} test',
]
return fill_masker, examples
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[int]) -> str:
"""simple docstring"""
_UpperCamelCase = fill_masker.tokenizer
_UpperCamelCase = fill_masker.model
_UpperCamelCase = fill_masker(
f'This is a {tokenizer.mask_token}' , )
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = fill_masker([f'This is a {tokenizer.mask_token}'])
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = fill_masker([f'This is a {tokenizer.mask_token}', f'Another {tokenizer.mask_token} great test.'])
self.assertEqual(
lowercase_ , [
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
] , )
with self.assertRaises(lowercase_):
fill_masker([None])
# No mask_token is not supported
with self.assertRaises(lowercase_):
fill_masker("This is")
self.run_test_top_k(lowercase_ , lowercase_)
self.run_test_targets(lowercase_ , lowercase_)
self.run_test_top_k_targets(lowercase_ , lowercase_)
self.fill_mask_with_duplicate_targets_and_top_k(lowercase_ , lowercase_)
self.fill_mask_with_multiple_masks(lowercase_ , lowercase_)
def __UpperCAmelCase ( self : int , lowercase_ : Dict , lowercase_ : List[str]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = tokenizer.get_vocab()
_UpperCamelCase = sorted(vocab.keys())[:2]
# Pipeline argument
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_ , targets=lowercase_)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}')
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , lowercase_)
_UpperCamelCase = [tokenizer.decode([x]) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(lowercase_))
# Call argument
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase_)
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , lowercase_)
_UpperCamelCase = [tokenizer.decode([x]) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(lowercase_))
# Score equivalence
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase_)
_UpperCamelCase = [top_mask["token_str"] for top_mask in outputs]
_UpperCamelCase = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowercase_) == set(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase_)
_UpperCamelCase = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(lowercase_) , nested_simplify(lowercase_))
# Raises with invalid
with self.assertRaises(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[])
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[""])
with self.assertRaises(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets="")
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : str , lowercase_ : List[str]) -> Any:
"""simple docstring"""
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_ , top_k=2)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}')
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2)
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
self.assertEqual(nested_simplify(lowercase_) , nested_simplify(lowercase_))
def __UpperCAmelCase ( self : Any , lowercase_ : Union[str, Any] , lowercase_ : Tuple) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = tokenizer.get_vocab()
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
# top_k=2, ntargets=3
_UpperCamelCase = sorted(vocab.keys())[:3]
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2 , targets=lowercase_)
# If we use the most probably targets, and filter differently, we should still
# have the same results
_UpperCamelCase = [el["token_str"] for el in sorted(lowercase_ , key=lambda lowercase_: x["score"] , reverse=lowercase_)]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowercase_).issubset(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=3 , targets=lowercase_)
# They should yield exactly the same result
self.assertEqual(nested_simplify(lowercase_) , nested_simplify(lowercase_))
def __UpperCAmelCase ( self : int , lowercase_ : Optional[int] , lowercase_ : List[str]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = tokenizer.get_vocab()
# String duplicates + id duplicates
_UpperCamelCase = sorted(vocab.keys())[:3]
_UpperCamelCase = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_UpperCamelCase = fill_masker(f'My name is {tokenizer.mask_token}' , targets=lowercase_ , top_k=10)
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(lowercase_) , 3)
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Any) -> Dict:
"""simple docstring"""
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = fill_masker(
f'This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}' , top_k=2)
self.assertEqual(
lowercase_ , [
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
] , )
| 82 | lowerCamelCase__ = '''Alexander Joslin'''
import operator as op
from .stack import Stack
def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
_UpperCamelCase = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
_UpperCamelCase = Stack()
_UpperCamelCase = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(a__ ) )
elif i in operators:
# RULE 2
operator_stack.push(a__ )
elif i == ")":
# RULE 4
_UpperCamelCase = operator_stack.peek()
operator_stack.pop()
_UpperCamelCase = operand_stack.peek()
operand_stack.pop()
_UpperCamelCase = operand_stack.peek()
operand_stack.pop()
_UpperCamelCase = operators[opr](a__ , a__ )
operand_stack.push(a__ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCamelCase__ = '''(5 + ((4 * 2) * (2 + 3)))'''
# answer = 45
print(F"{equation} = {dijkstras_two_stack_algorithm(equation)}")
| 82 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = StableDiffusionInstructPixaPixPipeline
__A = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''}
__A = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__A = IMAGE_TO_IMAGE_IMAGE_PARAMS
__A = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __UpperCAmelCase ( self : Dict) -> str:
"""simple docstring"""
torch.manual_seed(0)
_UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
_UpperCamelCase = PNDMScheduler(skip_prk_steps=lowercase_)
torch.manual_seed(0)
_UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0)
_UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_UpperCamelCase = CLIPTextModel(lowercase_)
_UpperCamelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
_UpperCamelCase = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __UpperCAmelCase ( self : Any , lowercase_ : Any , lowercase_ : List[Any]=0) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_)).to(lowercase_)
_UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1)[0]
_UpperCamelCase = Image.fromarray(np.uinta(lowercase_)).convert("RGB")
if str(lowercase_).startswith("mps"):
_UpperCamelCase = torch.manual_seed(lowercase_)
else:
_UpperCamelCase = torch.Generator(device=lowercase_).manual_seed(lowercase_)
_UpperCamelCase = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"image_guidance_scale": 1,
"output_type": "numpy",
}
return inputs
def __UpperCAmelCase ( self : Optional[Any]) -> Any:
"""simple docstring"""
_UpperCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = StableDiffusionInstructPixaPixPipeline(**lowercase_)
_UpperCamelCase = sd_pipe.to(lowercase_)
sd_pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = self.get_dummy_inputs(lowercase_)
_UpperCamelCase = sd_pipe(**lowercase_).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase = np.array([0.75_26, 0.37_50, 0.45_47, 0.61_17, 0.58_66, 0.50_16, 0.43_27, 0.56_42, 0.48_15])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def __UpperCAmelCase ( self : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = StableDiffusionInstructPixaPixPipeline(**lowercase_)
_UpperCamelCase = sd_pipe.to(lowercase_)
sd_pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = self.get_dummy_inputs(lowercase_)
_UpperCamelCase = "french fries"
_UpperCamelCase = sd_pipe(**lowercase_ , negative_prompt=lowercase_)
_UpperCamelCase = output.images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase = np.array([0.75_11, 0.36_42, 0.45_53, 0.62_36, 0.57_97, 0.50_13, 0.43_43, 0.56_11, 0.48_31])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def __UpperCAmelCase ( self : Dict) -> Any:
"""simple docstring"""
_UpperCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = StableDiffusionInstructPixaPixPipeline(**lowercase_)
_UpperCamelCase = sd_pipe.to(lowercase_)
sd_pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = self.get_dummy_inputs(lowercase_)
_UpperCamelCase = [inputs["prompt"]] * 2
_UpperCamelCase = np.array(inputs["image"]).astype(np.floataa) / 2_55.0
_UpperCamelCase = torch.from_numpy(lowercase_).unsqueeze(0).to(lowercase_)
_UpperCamelCase = image / 2 + 0.5
_UpperCamelCase = image.permute(0 , 3 , 1 , 2)
_UpperCamelCase = image.repeat(2 , 1 , 1 , 1)
_UpperCamelCase = sd_pipe(**lowercase_).images
_UpperCamelCase = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
_UpperCamelCase = np.array([0.58_12, 0.57_48, 0.52_22, 0.59_08, 0.56_95, 0.71_74, 0.68_04, 0.55_23, 0.55_79])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def __UpperCAmelCase ( self : Union[str, Any]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = EulerAncestralDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear")
_UpperCamelCase = StableDiffusionInstructPixaPixPipeline(**lowercase_)
_UpperCamelCase = sd_pipe.to(lowercase_)
sd_pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = self.get_dummy_inputs(lowercase_)
_UpperCamelCase = sd_pipe(**lowercase_).images
_UpperCamelCase = image[0, -3:, -3:, -1]
_UpperCamelCase = [round(lowercase_ , 4) for x in image_slice.flatten().tolist()]
print(",".join([str(lowercase_) for x in slice]))
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase = np.array([0.74_17, 0.38_42, 0.47_32, 0.57_76, 0.58_91, 0.51_39, 0.40_52, 0.56_73, 0.49_86])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def __UpperCAmelCase ( self : Dict) -> Tuple:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
def __UpperCAmelCase ( self : Optional[Any]) -> Dict:
"""simple docstring"""
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = StableDiffusionInstructPixaPixPipeline(**lowercase_)
_UpperCamelCase = VaeImageProcessor(do_resize=lowercase_ , do_normalize=lowercase_)
_UpperCamelCase = pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = pipe(**self.get_dummy_inputs_by_type(lowercase_ , input_image_type="pt"))[0]
_UpperCamelCase = components["vae"]
_UpperCamelCase = self.get_dummy_inputs_by_type(lowercase_ , input_image_type="pt")
for image_param in self.image_latents_params:
if image_param in inputs.keys():
_UpperCamelCase = vae.encode(inputs[image_param]).latent_dist.mode()
_UpperCamelCase = pipe(**lowercase_)[0]
_UpperCamelCase = np.abs(out - out_latents_inputs).max()
self.assertLess(lowercase_ , 1e-4 , "passing latents as image input generate different result from passing image")
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : str) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : List[str] , lowercase_ : Tuple=0) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = torch.manual_seed(lowercase_)
_UpperCamelCase = load_image(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg")
_UpperCamelCase = {
"prompt": "turn him into a cyborg",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"image_guidance_scale": 1.0,
"output_type": "numpy",
}
return inputs
def __UpperCAmelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=lowercase_)
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
pipe.enable_attention_slicing()
_UpperCamelCase = self.get_inputs()
_UpperCamelCase = pipe(**lowercase_).images
_UpperCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_UpperCamelCase = np.array([0.59_02, 0.60_15, 0.60_27, 0.59_83, 0.60_92, 0.60_61, 0.57_65, 0.57_85, 0.55_55])
assert np.abs(expected_slice - image_slice).max() < 1e-3
def __UpperCAmelCase ( self : int) -> Any:
"""simple docstring"""
_UpperCamelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=lowercase_)
_UpperCamelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
pipe.enable_attention_slicing()
_UpperCamelCase = self.get_inputs()
_UpperCamelCase = pipe(**lowercase_).images
_UpperCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_UpperCamelCase = np.array([0.65_78, 0.68_17, 0.69_72, 0.67_61, 0.68_56, 0.69_16, 0.64_28, 0.65_16, 0.63_01])
assert np.abs(expected_slice - image_slice).max() < 1e-3
def __UpperCAmelCase ( self : Dict) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=lowercase_)
_UpperCamelCase = DDIMScheduler.from_config(pipe.scheduler.config)
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
pipe.enable_attention_slicing()
_UpperCamelCase = self.get_inputs()
_UpperCamelCase = pipe(**lowercase_).images
_UpperCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_UpperCamelCase = np.array([0.38_28, 0.38_34, 0.38_18, 0.37_92, 0.38_65, 0.37_52, 0.37_92, 0.38_47, 0.37_53])
assert np.abs(expected_slice - image_slice).max() < 1e-3
def __UpperCAmelCase ( self : Dict) -> List[str]:
"""simple docstring"""
_UpperCamelCase = 0
def callback_fn(lowercase_ : int , lowercase_ : int , lowercase_ : torch.FloatTensor) -> None:
_UpperCamelCase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
_UpperCamelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
_UpperCamelCase = latents[0, -3:, -3:, -1]
_UpperCamelCase = np.array([-0.24_63, -0.46_44, -0.97_56, 1.51_76, 1.44_14, 0.78_66, 0.98_97, 0.85_21, 0.79_83])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2
elif step == 2:
_UpperCamelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
_UpperCamelCase = latents[0, -3:, -3:, -1]
_UpperCamelCase = np.array([-0.26_44, -0.46_26, -0.96_53, 1.51_76, 1.45_51, 0.76_86, 0.98_05, 0.84_52, 0.81_15])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2
_UpperCamelCase = False
_UpperCamelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=lowercase_ , torch_dtype=torch.floataa)
_UpperCamelCase = pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
pipe.enable_attention_slicing()
_UpperCamelCase = self.get_inputs()
pipe(**lowercase_ , callback=lowercase_ , callback_steps=1)
assert callback_fn.has_been_called
assert number_of_steps == 3
def __UpperCAmelCase ( self : Tuple) -> int:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCamelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=lowercase_ , torch_dtype=torch.floataa)
_UpperCamelCase = pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
_UpperCamelCase = self.get_inputs()
_UpperCamelCase = pipe(**lowercase_)
_UpperCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def __UpperCAmelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
_UpperCamelCase = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
_UpperCamelCase = inputs["image"].resize((504, 504))
_UpperCamelCase = "timbrooks/instruct-pix2pix"
_UpperCamelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
lowercase_ , safety_checker=lowercase_ , )
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
pipe.enable_attention_slicing()
_UpperCamelCase = pipe(**lowercase_)
_UpperCamelCase = output.images[0]
_UpperCamelCase = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
_UpperCamelCase = np.array([0.27_26, 0.25_29, 0.26_64, 0.26_55, 0.26_41, 0.26_42, 0.25_91, 0.26_49, 0.25_90])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-3
| 82 | import logging
from transformers import PretrainedConfig
lowerCamelCase__ = logging.getLogger(__name__)
lowerCamelCase__ = {
'''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''',
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''bertabs'''
def __init__( self : List[str] , lowercase_ : int=30522 , lowercase_ : str=512 , lowercase_ : int=6 , lowercase_ : Optional[Any]=512 , lowercase_ : Optional[Any]=8 , lowercase_ : Optional[int]=512 , lowercase_ : Tuple=0.2 , lowercase_ : Union[str, Any]=6 , lowercase_ : List[Any]=768 , lowercase_ : List[str]=8 , lowercase_ : int=2048 , lowercase_ : Tuple=0.2 , **lowercase_ : str , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**lowercase_)
_UpperCamelCase = vocab_size
_UpperCamelCase = max_pos
_UpperCamelCase = enc_layers
_UpperCamelCase = enc_hidden_size
_UpperCamelCase = enc_heads
_UpperCamelCase = enc_ff_size
_UpperCamelCase = enc_dropout
_UpperCamelCase = dec_layers
_UpperCamelCase = dec_hidden_size
_UpperCamelCase = dec_heads
_UpperCamelCase = dec_ff_size
_UpperCamelCase = dec_dropout
| 82 | 1 |
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( lowerCAmelCase, lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = VQModel
__A = '''sample'''
@property
def __UpperCAmelCase ( self : int , lowercase_ : Any=(32, 32)) -> List[str]:
"""simple docstring"""
_UpperCamelCase = 4
_UpperCamelCase = 3
_UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes).to(lowercase_)
return {"sample": image}
@property
def __UpperCAmelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
return (3, 32, 32)
@property
def __UpperCAmelCase ( self : Tuple) -> Union[str, Any]:
"""simple docstring"""
return (3, 32, 32)
def __UpperCAmelCase ( self : str) -> Tuple:
"""simple docstring"""
_UpperCamelCase = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 3,
}
_UpperCamelCase = self.dummy_input
return init_dict, inputs_dict
def __UpperCAmelCase ( self : int) -> List[Any]:
"""simple docstring"""
pass
def __UpperCAmelCase ( self : Any) -> Optional[int]:
"""simple docstring"""
pass
def __UpperCAmelCase ( self : Dict) -> List[str]:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=lowercase_)
self.assertIsNotNone(lowercase_)
self.assertEqual(len(loading_info["missing_keys"]) , 0)
model.to(lowercase_)
_UpperCamelCase = model(**self.dummy_input)
assert image is not None, "Make sure output is not None"
def __UpperCAmelCase ( self : Dict) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = VQModel.from_pretrained("fusing/vqgan-dummy")
model.to(lowercase_).eval()
torch.manual_seed(0)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0)
_UpperCamelCase = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size)
_UpperCamelCase = image.to(lowercase_)
with torch.no_grad():
_UpperCamelCase = model(lowercase_).sample
_UpperCamelCase = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_UpperCamelCase = torch.tensor([-0.01_53, -0.40_44, -0.18_80, -0.51_61, -0.24_18, -0.40_72, -0.16_12, -0.06_33, -0.01_43])
# fmt: on
self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-3))
| 82 | from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowerCamelCase__ = input('''Enter image url: ''').strip()
print(F"Downloading image from {url} ...")
lowerCamelCase__ = BeautifulSoup(requests.get(url).content, '''html.parser''')
# The image URL is in the content field of the first meta tag with property og:image
lowerCamelCase__ = soup.find('''meta''', {'''property''': '''og:image'''})['''content''']
lowerCamelCase__ = requests.get(image_url).content
lowerCamelCase__ = F"{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"
with open(file_name, '''wb''') as fp:
fp.write(image_data)
print(F"Done. Image saved to disk as {file_name}.")
| 82 | 1 |
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
lowerCamelCase__ = numpy.array([0, 0])
lowerCamelCase__ = numpy.array([0.5, 0.8660254])
lowerCamelCase__ = numpy.array([1, 0])
lowerCamelCase__ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def lowerCAmelCase__ ( a__ , a__ ) ->list[numpy.ndarray]:
'''simple docstring'''
_UpperCamelCase = initial_vectors
for _ in range(a__ ):
_UpperCamelCase = iteration_step(a__ )
return vectors
def lowerCAmelCase__ ( a__ ) ->list[numpy.ndarray]:
'''simple docstring'''
_UpperCamelCase = []
for i, start_vector in enumerate(vectors[:-1] ):
_UpperCamelCase = vectors[i + 1]
new_vectors.append(a__ )
_UpperCamelCase = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def lowerCAmelCase__ ( a__ , a__ ) ->numpy.ndarray:
'''simple docstring'''
_UpperCamelCase = numpy.radians(a__ )
_UpperCamelCase , _UpperCamelCase = numpy.cos(a__ ), numpy.sin(a__ )
_UpperCamelCase = numpy.array(((c, -s), (s, c)) )
return numpy.dot(a__ , a__ )
def lowerCAmelCase__ ( a__ ) ->None:
'''simple docstring'''
_UpperCamelCase = plt.gca()
axes.set_aspect("equal" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_UpperCamelCase , _UpperCamelCase = zip(*a__ )
plt.plot(a__ , a__ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 82 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'''
),
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''dpr'''
def __init__( self : Optional[Any] , lowercase_ : int=30522 , lowercase_ : str=768 , lowercase_ : List[Any]=12 , lowercase_ : Dict=12 , lowercase_ : str=3072 , lowercase_ : Any="gelu" , lowercase_ : Any=0.1 , lowercase_ : Any=0.1 , lowercase_ : str=512 , lowercase_ : str=2 , lowercase_ : List[Any]=0.02 , lowercase_ : Dict=1e-1_2 , lowercase_ : List[str]=0 , lowercase_ : Union[str, Any]="absolute" , lowercase_ : int = 0 , **lowercase_ : int , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=lowercase_ , **lowercase_)
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_act
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = projection_dim
_UpperCamelCase = position_embedding_type
| 82 | 1 |
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''MCTCTFeatureExtractor'''
__A = '''AutoTokenizer'''
def __init__( self : Any , lowercase_ : Any , lowercase_ : List[str]) -> Dict:
"""simple docstring"""
super().__init__(lowercase_ , lowercase_)
_UpperCamelCase = self.feature_extractor
_UpperCamelCase = False
def __call__( self : Union[str, Any] , *lowercase_ : Union[str, Any] , **lowercase_ : List[str]) -> Any:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*lowercase_ , **lowercase_)
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.")
_UpperCamelCase = kwargs.pop("raw_speech")
else:
_UpperCamelCase = kwargs.pop("audio" , lowercase_)
_UpperCamelCase = kwargs.pop("sampling_rate" , lowercase_)
_UpperCamelCase = kwargs.pop("text" , lowercase_)
if len(lowercase_) > 0:
_UpperCamelCase = args[0]
_UpperCamelCase = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process.")
if audio is not None:
_UpperCamelCase = self.feature_extractor(lowercase_ , *lowercase_ , sampling_rate=lowercase_ , **lowercase_)
if text is not None:
_UpperCamelCase = self.tokenizer(lowercase_ , **lowercase_)
if text is None:
return inputs
elif audio is None:
return encodings
else:
_UpperCamelCase = encodings["input_ids"]
return inputs
def __UpperCAmelCase ( self : Optional[int] , *lowercase_ : int , **lowercase_ : List[Any]) -> Optional[int]:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : Union[str, Any] , *lowercase_ : List[Any] , **lowercase_ : List[Any]) -> List[str]:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*lowercase_ , **lowercase_)
_UpperCamelCase = kwargs.pop("input_features" , lowercase_)
_UpperCamelCase = kwargs.pop("labels" , lowercase_)
if len(lowercase_) > 0:
_UpperCamelCase = args[0]
_UpperCamelCase = args[1:]
if input_features is not None:
_UpperCamelCase = self.feature_extractor.pad(lowercase_ , *lowercase_ , **lowercase_)
if labels is not None:
_UpperCamelCase = self.tokenizer.pad(lowercase_ , **lowercase_)
if labels is None:
return input_features
elif input_features is None:
return labels
else:
_UpperCamelCase = labels["input_ids"]
return input_features
def __UpperCAmelCase ( self : List[str] , *lowercase_ : str , **lowercase_ : Optional[Any]) -> Optional[int]:
"""simple docstring"""
return self.tokenizer.decode(*lowercase_ , **lowercase_)
@contextmanager
def __UpperCAmelCase ( self : Dict) -> Tuple:
"""simple docstring"""
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call.")
_UpperCamelCase = True
_UpperCamelCase = self.tokenizer
yield
_UpperCamelCase = self.feature_extractor
_UpperCamelCase = False
| 82 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
'''configuration_table_transformer''': [
'''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TableTransformerConfig''',
'''TableTransformerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TableTransformerForObjectDetection''',
'''TableTransformerModel''',
'''TableTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 82 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase__ = {
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase__ = {
'''distilbert-base-uncased''': 512,
'''distilbert-base-uncased-distilled-squad''': 512,
'''distilbert-base-cased''': 512,
'''distilbert-base-cased-distilled-squad''': 512,
'''distilbert-base-german-cased''': 512,
'''distilbert-base-multilingual-cased''': 512,
}
lowerCamelCase__ = {
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = PRETRAINED_INIT_CONFIGURATION
__A = ['''input_ids''', '''attention_mask''']
__A = DistilBertTokenizer
def __init__( self : Tuple , lowercase_ : int=None , lowercase_ : Optional[int]=None , lowercase_ : int=True , lowercase_ : Optional[int]="[UNK]" , lowercase_ : str="[SEP]" , lowercase_ : str="[PAD]" , lowercase_ : str="[CLS]" , lowercase_ : Optional[Any]="[MASK]" , lowercase_ : Optional[Any]=True , lowercase_ : int=None , **lowercase_ : Optional[Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , do_lower_case=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , tokenize_chinese_chars=lowercase_ , strip_accents=lowercase_ , **lowercase_ , )
_UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("lowercase" , lowercase_) != do_lower_case
or normalizer_state.get("strip_accents" , lowercase_) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowercase_) != tokenize_chinese_chars
):
_UpperCamelCase = getattr(lowercase_ , normalizer_state.pop("type"))
_UpperCamelCase = do_lower_case
_UpperCamelCase = strip_accents
_UpperCamelCase = tokenize_chinese_chars
_UpperCamelCase = normalizer_class(**lowercase_)
_UpperCamelCase = do_lower_case
def __UpperCAmelCase ( self : int , lowercase_ : Optional[Any] , lowercase_ : str=None) -> Tuple:
"""simple docstring"""
_UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCAmelCase ( self : List[str] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def __UpperCAmelCase ( self : str , lowercase_ : str , lowercase_ : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
_UpperCamelCase = self._tokenizer.model.save(lowercase_ , name=lowercase_)
return tuple(lowercase_)
| 82 | import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowerCamelCase__ = {
'''cola''': 2,
'''mnli''': 3,
'''mrpc''': 2,
'''sst-2''': 2,
'''sts-b''': 1,
'''qqp''': 2,
'''qnli''': 2,
'''rte''': 2,
'''wnli''': 2,
}
logging.set_verbosity_info()
def lowerCAmelCase__ ( a__ , a__ , a__ , a__=None ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase = XLNetConfig.from_json_file(a__ )
_UpperCamelCase = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f'Building PyTorch XLNetForSequenceClassification model from configuration: {config}' )
_UpperCamelCase = finetuning_task
_UpperCamelCase = GLUE_TASKS_NUM_LABELS[finetuning_task]
_UpperCamelCase = XLNetForSequenceClassification(a__ )
elif "squad" in finetuning_task:
_UpperCamelCase = finetuning_task
_UpperCamelCase = XLNetForQuestionAnswering(a__ )
else:
_UpperCamelCase = XLNetLMHeadModel(a__ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(a__ , a__ , a__ )
# Save pytorch-model
_UpperCamelCase = os.path.join(a__ , a__ )
_UpperCamelCase = os.path.join(a__ , a__ )
print(f'Save PyTorch model to {os.path.abspath(a__ )}' )
torch.save(model.state_dict() , a__ )
print(f'Save configuration file to {os.path.abspath(a__ )}' )
with open(a__ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--xlnet_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained XLNet model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--finetuning_task''',
default=None,
type=str,
help='''Name of a task on which the XLNet TensorFlow model was fine-tuned''',
)
lowerCamelCase__ = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 82 | 1 |
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = inspect.getfile(accelerate.test_utils)
_UpperCamelCase = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_script.py"])
_UpperCamelCase = os.path.sep.join(inspect.getfile(self.__class__).split(os.path.sep)[:-1])
@require_tpu
def __UpperCAmelCase ( self : List[Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = f'\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n '.split()
_UpperCamelCase = [sys.executable] + distributed_args
execute_subprocess_async(lowercase_ , env=os.environ.copy())
| 82 | import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _UpperCAmelCase ( pl.LightningModule ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase_ : Tuple) -> int:
"""simple docstring"""
super().__init__()
_UpperCamelCase = model
_UpperCamelCase = 2
_UpperCamelCase = nn.Linear(self.model.config.hidden_size , self.num_labels)
def __UpperCAmelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
pass
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->str:
'''simple docstring'''
_UpperCamelCase = LongformerModel.from_pretrained(a__ )
_UpperCamelCase = LightningModel(a__ )
_UpperCamelCase = torch.load(a__ , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
_UpperCamelCase = LongformerForQuestionAnswering.from_pretrained(a__ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(a__ )
print(f'Conversion successful. Model saved under {pytorch_dump_folder_path}' )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase__ = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 82 | 1 |
import heapq as hq
import math
from collections.abc import Iterator
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : Any , lowercase_ : List[str]) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = str(id_)
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = []
_UpperCamelCase = {} # {vertex:distance}
def __lt__( self : Any , lowercase_ : Optional[Any]) -> Dict:
"""simple docstring"""
return self.key < other.key
def __repr__( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
return self.id
def __UpperCAmelCase ( self : int , lowercase_ : Any) -> List[Any]:
"""simple docstring"""
self.neighbors.append(lowercase_)
def __UpperCAmelCase ( self : Any , lowercase_ : Tuple , lowercase_ : int) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = weight
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ ) ->List[Any]:
'''simple docstring'''
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , a__ )
graph[b - 1].add_edge(graph[a - 1] , a__ )
def lowerCAmelCase__ ( a__ , a__ ) ->list:
'''simple docstring'''
_UpperCamelCase = []
for u in graph:
_UpperCamelCase = math.inf
_UpperCamelCase = None
_UpperCamelCase = 0
_UpperCamelCase = graph[:]
while q:
_UpperCamelCase = min(a__ )
q.remove(a__ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
_UpperCamelCase = u
_UpperCamelCase = u.edges[v.id]
for i in range(1 , len(a__ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def lowerCAmelCase__ ( a__ , a__ ) ->Iterator[tuple]:
'''simple docstring'''
for u in graph:
_UpperCamelCase = math.inf
_UpperCamelCase = None
_UpperCamelCase = 0
_UpperCamelCase = list(a__ )
hq.heapify(a__ )
while h:
_UpperCamelCase = hq.heappop(a__ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
_UpperCamelCase = u
_UpperCamelCase = u.edges[v.id]
hq.heapify(a__ )
for i in range(1 , len(a__ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def lowerCAmelCase__ ( ) ->None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowerCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : str , *lowercase_ : List[str] , **lowercase_ : Union[str, Any]) -> None:
"""simple docstring"""
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_)
| 82 | 1 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = field(default='''image-classification''', metadata={'''include_in_asdict_even_if_is_default''': True} )
__A = Features({'''image''': Image()} )
__A = Features({'''labels''': ClassLabel} )
__A = "image"
__A = "labels"
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : int) -> List[str]:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.')
if not isinstance(features[self.label_column] , lowercase_):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.')
_UpperCamelCase = copy.deepcopy(self)
_UpperCamelCase = self.label_schema.copy()
_UpperCamelCase = features[self.label_column]
_UpperCamelCase = label_schema
return task_template
@property
def __UpperCAmelCase ( self : Dict) -> Dict[str, str]:
"""simple docstring"""
return {
self.image_column: "image",
self.label_column: "labels",
}
| 82 | import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( enum.Enum ):
'''simple docstring'''
__A = 0
__A = 1
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''generated'''
def __init__( self : Any , *lowercase_ : Dict , **lowercase_ : Tuple) -> List[Any]:
"""simple docstring"""
super().__init__(*lowercase_ , **lowercase_)
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING)
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : Union[str, Any]=None , lowercase_ : Optional[Any]=None , lowercase_ : Optional[int]=None , lowercase_ : Optional[Any]=None , lowercase_ : Any=None , lowercase_ : Union[str, Any]=None , **lowercase_ : Optional[Any] , ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = {}
if truncation is not None:
_UpperCamelCase = truncation
_UpperCamelCase = generate_kwargs
_UpperCamelCase = {}
if return_tensors is not None and return_type is None:
_UpperCamelCase = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
_UpperCamelCase = return_type
if clean_up_tokenization_spaces is not None:
_UpperCamelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
_UpperCamelCase = self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_)
if len(lowercase_) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim.")
_UpperCamelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __UpperCAmelCase ( self : int , lowercase_ : int , lowercase_ : int , lowercase_ : int) -> Any:
"""simple docstring"""
return True
def __UpperCAmelCase ( self : Dict , *lowercase_ : List[str] , lowercase_ : List[Any]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = self.model.config.prefix if self.model.config.prefix is not None else ""
if isinstance(args[0] , lowercase_):
if self.tokenizer.pad_token_id is None:
raise ValueError("Please make sure that the tokenizer has a pad_token_id when using a batch input")
_UpperCamelCase = ([prefix + arg for arg in args[0]],)
_UpperCamelCase = True
elif isinstance(args[0] , lowercase_):
_UpperCamelCase = (prefix + args[0],)
_UpperCamelCase = False
else:
raise ValueError(
f' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`')
_UpperCamelCase = self.tokenizer(*lowercase_ , padding=lowercase_ , truncation=lowercase_ , return_tensors=self.framework)
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : List[Any] , *lowercase_ : Any , **lowercase_ : int) -> Dict:
"""simple docstring"""
_UpperCamelCase = super().__call__(*lowercase_ , **lowercase_)
if (
isinstance(args[0] , lowercase_)
and all(isinstance(lowercase_ , lowercase_) for el in args[0])
and all(len(lowercase_) == 1 for res in result)
):
return [res[0] for res in result]
return result
def __UpperCAmelCase ( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : str=TruncationStrategy.DO_NOT_TRUNCATE , **lowercase_ : Dict) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = self._parse_and_tokenize(lowercase_ , truncation=lowercase_ , **lowercase_)
return inputs
def __UpperCAmelCase ( self : str , lowercase_ : str , **lowercase_ : str) -> str:
"""simple docstring"""
if self.framework == "pt":
_UpperCamelCase , _UpperCamelCase = model_inputs["input_ids"].shape
elif self.framework == "tf":
_UpperCamelCase , _UpperCamelCase = tf.shape(model_inputs["input_ids"]).numpy()
_UpperCamelCase = generate_kwargs.get("min_length" , self.model.config.min_length)
_UpperCamelCase = generate_kwargs.get("max_length" , self.model.config.max_length)
self.check_inputs(lowercase_ , generate_kwargs["min_length"] , generate_kwargs["max_length"])
_UpperCamelCase = self.model.generate(**lowercase_ , **lowercase_)
_UpperCamelCase = output_ids.shape[0]
if self.framework == "pt":
_UpperCamelCase = output_ids.reshape(lowercase_ , out_b // in_b , *output_ids.shape[1:])
elif self.framework == "tf":
_UpperCamelCase = tf.reshape(lowercase_ , (in_b, out_b // in_b, *output_ids.shape[1:]))
return {"output_ids": output_ids}
def __UpperCAmelCase ( self : Dict , lowercase_ : str , lowercase_ : int=ReturnType.TEXT , lowercase_ : int=False) -> Tuple:
"""simple docstring"""
_UpperCamelCase = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
_UpperCamelCase = {f'{self.return_name}_token_ids': output_ids}
elif return_type == ReturnType.TEXT:
_UpperCamelCase = {
f'{self.return_name}_text': self.tokenizer.decode(
lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ , )
}
records.append(lowercase_)
return records
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''summary'''
def __call__( self : Optional[Any] , *lowercase_ : int , **lowercase_ : Dict) -> Optional[int]:
"""simple docstring"""
return super().__call__(*lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : List[str] , lowercase_ : int , lowercase_ : int , lowercase_ : int) -> bool:
"""simple docstring"""
if max_length < min_length:
logger.warning(f'Your min_length={min_length} must be inferior than your max_length={max_length}.')
if input_length < max_length:
logger.warning(
f'Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '
"a summarization task, where outputs shorter than the input are typically wanted, you might "
f'consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})')
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''translation'''
def __UpperCAmelCase ( self : Dict , lowercase_ : int , lowercase_ : int , lowercase_ : int) -> int:
"""simple docstring"""
if input_length > 0.9 * max_length:
logger.warning(
f'Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '
"increasing your max_length manually, e.g. translator('...', max_length=400)")
return True
def __UpperCAmelCase ( self : Tuple , *lowercase_ : Any , lowercase_ : List[Any]=TruncationStrategy.DO_NOT_TRUNCATE , lowercase_ : Any=None , lowercase_ : Optional[Any]=None) -> List[str]:
"""simple docstring"""
if getattr(self.tokenizer , "_build_translation_inputs" , lowercase_):
return self.tokenizer._build_translation_inputs(
*lowercase_ , return_tensors=self.framework , truncation=lowercase_ , src_lang=lowercase_ , tgt_lang=lowercase_)
else:
return super()._parse_and_tokenize(*lowercase_ , truncation=lowercase_)
def __UpperCAmelCase ( self : List[str] , lowercase_ : Dict=None , lowercase_ : str=None , **lowercase_ : List[Any]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = super()._sanitize_parameters(**lowercase_)
if src_lang is not None:
_UpperCamelCase = src_lang
if tgt_lang is not None:
_UpperCamelCase = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
_UpperCamelCase = kwargs.get("task" , self.task)
_UpperCamelCase = task.split("_")
if task and len(lowercase_) == 4:
# translation, XX, to YY
_UpperCamelCase = items[1]
_UpperCamelCase = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[str] , *lowercase_ : List[str] , **lowercase_ : str) -> Union[str, Any]:
"""simple docstring"""
return super().__call__(*lowercase_ , **lowercase_)
| 82 | 1 |
import functools
def lowerCAmelCase__ ( a__ , a__ ) ->int:
'''simple docstring'''
if not isinstance(a__ , a__ ) or not all(isinstance(a__ , a__ ) for day in days ):
raise ValueError("The parameter days should be a list of integers" )
if len(a__ ) != 3 or not all(isinstance(a__ , a__ ) for cost in costs ):
raise ValueError("The parameter costs should be a list of three integers" )
if len(a__ ) == 0:
return 0
if min(a__ ) <= 0:
raise ValueError("All days elements should be greater than 0" )
if max(a__ ) >= 366:
raise ValueError("All days elements should be less than 366" )
_UpperCamelCase = set(a__ )
@functools.cache
def dynamic_programming(a__ ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'''vocab_file''': '''spiece.model'''}
lowerCamelCase__ = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
}
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCamelCase__ = {
'''t5-small''': 512,
'''t5-base''': 512,
'''t5-large''': 512,
'''t5-3b''': 512,
'''t5-11b''': 512,
}
lowerCamelCase__ = '''▁'''
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , lowercase_ : int , lowercase_ : str="</s>" , lowercase_ : Optional[Any]="<unk>" , lowercase_ : Dict="<pad>" , lowercase_ : Tuple=100 , lowercase_ : str=None , lowercase_ : Optional[Dict[str, Any]] = None , lowercase_ : str=True , **lowercase_ : Optional[Any] , ) -> None:
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
_UpperCamelCase = [f'<extra_id_{i}>' for i in range(lowercase_)]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_UpperCamelCase = len(set(filter(lambda lowercase_: bool("extra_id" in str(lowercase_)) , lowercase_)))
if extra_tokens != extra_ids:
raise ValueError(
f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens")
if legacy:
logger.warning_once(
f'You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'
" read the related pull request available at https://github.com/huggingface/transformers/pull/24565")
_UpperCamelCase = legacy
_UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , extra_ids=lowercase_ , additional_special_tokens=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , legacy=lowercase_ , **lowercase_ , )
_UpperCamelCase = vocab_file
_UpperCamelCase = extra_ids
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(lowercase_)
@staticmethod
def __UpperCAmelCase ( lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : str) -> Any:
"""simple docstring"""
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
_UpperCamelCase = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
f' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
f' {pretrained_model_name_or_path} automatically truncating your input to'
f' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
f' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , lowercase_ , )
return max_model_length
@property
def __UpperCAmelCase ( self : Dict) -> Optional[int]:
"""simple docstring"""
return self.sp_model.get_piece_size() + self._extra_ids
def __UpperCAmelCase ( self : Dict) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = {self.convert_ids_to_tokens(lowercase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __UpperCAmelCase ( self : Dict , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_)
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowercase_)) + [1]
return ([0] * len(lowercase_)) + [1] + ([0] * len(lowercase_)) + [1]
def __UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
return list(
set(filter(lambda lowercase_: bool(re.search(R"<extra_id_\d+>" , lowercase_)) is not None , self.additional_special_tokens)))
def __UpperCAmelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
return [self._convert_token_to_id(lowercase_) for token in self.get_sentinel_tokens()]
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : List[int]) -> List[int]:
"""simple docstring"""
if len(lowercase_) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
" eos tokens being added.")
return token_ids
else:
return token_ids + [self.eos_token_id]
def __UpperCAmelCase ( self : List[str] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCamelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos) * [0]
return len(token_ids_a + eos + token_ids_a + eos) * [0]
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCamelCase = self._add_eos_if_not_present(lowercase_)
if token_ids_a is None:
return token_ids_a
else:
_UpperCamelCase = self._add_eos_if_not_present(lowercase_)
return token_ids_a + token_ids_a
def __getstate__( self : Tuple) -> Any:
"""simple docstring"""
_UpperCamelCase = self.__dict__.copy()
_UpperCamelCase = None
return state
def __setstate__( self : Optional[Any] , lowercase_ : Any) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs"):
_UpperCamelCase = {}
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def __UpperCAmelCase ( self : int , lowercase_ : "TextInput" , **lowercase_ : Optional[int]) -> List[str]:
"""simple docstring"""
if not self.legacy:
_UpperCamelCase = SPIECE_UNDERLINE + text.replace(lowercase_ , " ")
return super().tokenize(lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : int , **lowercase_ : Optional[int]) -> List[str]:
"""simple docstring"""
if not self.legacy:
_UpperCamelCase = text.startswith(lowercase_)
if is_first:
_UpperCamelCase = text[1:]
_UpperCamelCase = self.sp_model.encode(lowercase_ , out_type=lowercase_)
if not self.legacy and not is_first and not text.startswith(" ") and tokens[0].startswith(lowercase_):
_UpperCamelCase = ([tokens[0][1:]] if len(tokens[0]) > 1 else []) + tokens[1:]
return tokens
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Optional[Any]) -> List[Any]:
"""simple docstring"""
if token.startswith("<extra_id_"):
_UpperCamelCase = re.match(R"<extra_id_(\d+)>" , lowercase_)
_UpperCamelCase = int(match.group(1))
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(lowercase_)
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Any) -> int:
"""simple docstring"""
if index < self.sp_model.get_piece_size():
_UpperCamelCase = self.sp_model.IdToPiece(lowercase_)
else:
_UpperCamelCase = f'<extra_id_{self.vocab_size - 1 - index}>'
return token
def __UpperCAmelCase ( self : Dict , lowercase_ : Optional[int]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = []
_UpperCamelCase = ""
_UpperCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase_) + token
_UpperCamelCase = True
_UpperCamelCase = []
else:
current_sub_tokens.append(lowercase_)
_UpperCamelCase = False
out_string += self.sp_model.decode(lowercase_)
return out_string.strip()
def __UpperCAmelCase ( self : List[str] , lowercase_ : str , lowercase_ : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowercase_):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
_UpperCamelCase = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowercase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowercase_)
elif not os.path.isfile(self.vocab_file):
with open(lowercase_ , "wb") as fi:
_UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(lowercase_)
return (out_vocab_file,)
| 82 | 1 |
def lowerCAmelCase__ ( a__ ) ->list:
'''simple docstring'''
if len(a__ ) <= 1:
return lst
_UpperCamelCase = 1
while i < len(a__ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
_UpperCamelCase , _UpperCamelCase = lst[i], lst[i - 1]
i -= 1
if i == 0:
_UpperCamelCase = 1
return lst
if __name__ == "__main__":
lowerCamelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCamelCase__ = [int(item) for item in user_input.split(''',''')]
print(gnome_sort(unsorted))
| 82 | from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowerCAmelCase__ ( a__ ) ->str:
'''simple docstring'''
return getitem, k
def lowerCAmelCase__ ( a__ , a__ ) ->Tuple:
'''simple docstring'''
return setitem, k, v
def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
return delitem, k
def lowerCAmelCase__ ( a__ , a__ , *a__ ) ->List[str]:
'''simple docstring'''
try:
return fun(a__ , *a__ ), None
except Exception as e:
return None, e
lowerCamelCase__ = (
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
)
lowerCamelCase__ = [
_set('''key_a''', '''val_a'''),
_set('''key_a''', '''val_b'''),
]
lowerCamelCase__ = [
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
_del('''key_a'''),
_del('''key_b'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
]
lowerCamelCase__ = [
_get('''key_a'''),
_del('''key_a'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
_del('''key_a'''),
_get('''key_a'''),
]
lowerCamelCase__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
lowerCamelCase__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('''key_a''', '''val_b'''),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def lowerCAmelCase__ ( a__ ) ->Dict:
'''simple docstring'''
_UpperCamelCase = HashMap(initial_block_size=4 )
_UpperCamelCase = {}
for _, (fun, *args) in enumerate(a__ ):
_UpperCamelCase , _UpperCamelCase = _run_operation(a__ , a__ , *a__ )
_UpperCamelCase , _UpperCamelCase = _run_operation(a__ , a__ , *a__ )
assert my_res == py_res
assert str(a__ ) == str(a__ )
assert set(a__ ) == set(a__ )
assert len(a__ ) == len(a__ )
assert set(my.items() ) == set(py.items() )
def lowerCAmelCase__ ( ) ->List[Any]:
'''simple docstring'''
def is_public(a__ ) -> bool:
return not name.startswith("_" )
_UpperCamelCase = {name for name in dir({} ) if is_public(a__ )}
_UpperCamelCase = {name for name in dir(HashMap() ) if is_public(a__ )}
assert dict_public_names > hash_public_names
| 82 | 1 |
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
lowerCamelCase__ = None
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase__ = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/tokenizer.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/tokenizer.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/tokenizer.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/tokenizer.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/tokenizer.json''',
},
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCamelCase__ = {
'''t5-small''': 512,
'''t5-base''': 512,
'''t5-large''': 512,
'''t5-3b''': 512,
'''t5-11b''': 512,
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = ['''input_ids''', '''attention_mask''']
__A = TaTokenizer
__A = []
def __init__( self : Optional[int] , lowercase_ : Optional[int]=None , lowercase_ : Optional[Any]=None , lowercase_ : List[str]="</s>" , lowercase_ : int="<unk>" , lowercase_ : Any="<pad>" , lowercase_ : List[str]=100 , lowercase_ : Optional[int]=None , **lowercase_ : List[str] , ) -> List[Any]:
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
_UpperCamelCase = [f'<extra_id_{i}>' for i in range(lowercase_)]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
_UpperCamelCase = len(set(filter(lambda lowercase_: bool("extra_id_" in str(lowercase_)) , lowercase_)))
if extra_tokens != extra_ids:
raise ValueError(
f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens")
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , extra_ids=lowercase_ , additional_special_tokens=lowercase_ , **lowercase_ , )
_UpperCamelCase = vocab_file
_UpperCamelCase = False if not self.vocab_file else True
_UpperCamelCase = extra_ids
@staticmethod
def __UpperCAmelCase ( lowercase_ : str , lowercase_ : str , lowercase_ : Union[str, Any]) -> Dict:
"""simple docstring"""
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
_UpperCamelCase = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
f' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
f' {pretrained_model_name_or_path} automatically truncating your input to'
f' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
f' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , lowercase_ , )
return max_model_length
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : str , lowercase_ : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(lowercase_):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
_UpperCamelCase = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowercase_):
copyfile(self.vocab_file , lowercase_)
logger.info(f'Copy vocab file to {out_vocab_file}')
return (out_vocab_file,)
def __UpperCAmelCase ( self : int , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCamelCase = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
_UpperCamelCase = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def __UpperCAmelCase ( self : Dict , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCamelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos) * [0]
return len(token_ids_a + eos + token_ids_a + eos) * [0]
def __UpperCAmelCase ( self : Optional[int]) -> int:
"""simple docstring"""
return list(
set(filter(lambda lowercase_: bool(re.search(R"<extra_id_\d+>" , lowercase_)) is not None , self.additional_special_tokens)))
def __UpperCAmelCase ( self : str) -> Optional[int]:
"""simple docstring"""
return [self.convert_tokens_to_ids(lowercase_) for token in self.get_sentinel_tokens()]
| 82 | import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _UpperCAmelCase ( lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = KandinskyVaaControlnetImgaImgPipeline
__A = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
__A = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
__A = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
__A = False
@property
def __UpperCAmelCase ( self : List[Any]) -> Tuple:
"""simple docstring"""
return 32
@property
def __UpperCAmelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
return 32
@property
def __UpperCAmelCase ( self : Optional[int]) -> str:
"""simple docstring"""
return self.time_input_dim
@property
def __UpperCAmelCase ( self : List[str]) -> Any:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
return 100
@property
def __UpperCAmelCase ( self : Dict) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0)
_UpperCamelCase = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
_UpperCamelCase = UNetaDConditionModel(**lowercase_)
return model
@property
def __UpperCAmelCase ( self : int) -> Optional[int]:
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __UpperCAmelCase ( self : int) -> Dict:
"""simple docstring"""
torch.manual_seed(0)
_UpperCamelCase = VQModel(**self.dummy_movq_kwargs)
return model
def __UpperCAmelCase ( self : int) -> Any:
"""simple docstring"""
_UpperCamelCase = self.dummy_unet
_UpperCamelCase = self.dummy_movq
_UpperCamelCase = {
"num_train_timesteps": 1000,
"beta_schedule": "linear",
"beta_start": 0.0_00_85,
"beta_end": 0.0_12,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
_UpperCamelCase = DDIMScheduler(**lowercase_)
_UpperCamelCase = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __UpperCAmelCase ( self : str , lowercase_ : Dict , lowercase_ : List[str]=0) -> List[str]:
"""simple docstring"""
_UpperCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowercase_)).to(lowercase_)
_UpperCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to(
lowercase_)
# create init_image
_UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase_)).to(lowercase_)
_UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1)[0]
_UpperCamelCase = Image.fromarray(np.uinta(lowercase_)).convert("RGB").resize((256, 256))
# create hint
_UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase_)).to(lowercase_)
if str(lowercase_).startswith("mps"):
_UpperCamelCase = torch.manual_seed(lowercase_)
else:
_UpperCamelCase = torch.Generator(device=lowercase_).manual_seed(lowercase_)
_UpperCamelCase = {
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def __UpperCAmelCase ( self : Any) -> str:
"""simple docstring"""
_UpperCamelCase = "cpu"
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = self.pipeline_class(**lowercase_)
_UpperCamelCase = pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = pipe(**self.get_dummy_inputs(lowercase_))
_UpperCamelCase = output.images
_UpperCamelCase = pipe(
**self.get_dummy_inputs(lowercase_) , return_dict=lowercase_ , )[0]
_UpperCamelCase = image[0, -3:, -3:, -1]
_UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase = np.array(
[0.54_98_50_34, 0.55_50_93_65, 0.52_56_15_04, 0.5_57_04_94, 0.5_59_38_18, 0.5_26_39_79, 0.50_28_56_43, 0.5_06_98_46, 0.51_19_67_36])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : Union[str, Any]) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
_UpperCamelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy")
_UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png")
_UpperCamelCase = init_image.resize((512, 512))
_UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png")
_UpperCamelCase = torch.from_numpy(np.array(lowercase_)).float() / 2_55.0
_UpperCamelCase = hint.permute(2 , 0 , 1).unsqueeze(0)
_UpperCamelCase = "A robot, 4k photo"
_UpperCamelCase = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa)
pipe_prior.to(lowercase_)
_UpperCamelCase = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa)
_UpperCamelCase = pipeline.to(lowercase_)
pipeline.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = torch.Generator(device="cpu").manual_seed(0)
_UpperCamelCase , _UpperCamelCase = pipe_prior(
lowercase_ , image=lowercase_ , strength=0.85 , generator=lowercase_ , negative_prompt="" , ).to_tuple()
_UpperCamelCase = pipeline(
image=lowercase_ , image_embeds=lowercase_ , negative_image_embeds=lowercase_ , hint=lowercase_ , generator=lowercase_ , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type="np" , )
_UpperCamelCase = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(lowercase_ , lowercase_)
| 82 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all MVP models at https://huggingface.co/models?filter=mvp
lowerCamelCase__ = {
'''vocab_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''',
},
'''added_tokens.json''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''',
},
'''merges_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''',
},
}
lowerCamelCase__ = {
'''RUCAIBox/mvp''': 1024,
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = ['''input_ids''', '''attention_mask''']
__A = MvpTokenizer
def __init__( self : Union[str, Any] , lowercase_ : Optional[int]=None , lowercase_ : List[str]=None , lowercase_ : str=None , lowercase_ : Union[str, Any]="replace" , lowercase_ : List[Any]="<s>" , lowercase_ : Optional[int]="</s>" , lowercase_ : Tuple="</s>" , lowercase_ : Tuple="<s>" , lowercase_ : List[Any]="<unk>" , lowercase_ : Union[str, Any]="<pad>" , lowercase_ : Optional[int]="<mask>" , lowercase_ : List[str]=False , lowercase_ : List[str]=True , **lowercase_ : str , ) -> str:
"""simple docstring"""
super().__init__(
lowercase_ , lowercase_ , tokenizer_file=lowercase_ , errors=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , mask_token=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ , **lowercase_ , )
_UpperCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get("add_prefix_space" , lowercase_) != add_prefix_space:
_UpperCamelCase = getattr(lowercase_ , pre_tok_state.pop("type"))
_UpperCamelCase = add_prefix_space
_UpperCamelCase = pre_tok_class(**lowercase_)
_UpperCamelCase = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_UpperCamelCase = "post_processor"
_UpperCamelCase = getattr(self.backend_tokenizer , lowercase_ , lowercase_)
if tokenizer_component_instance:
_UpperCamelCase = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_UpperCamelCase = tuple(state["sep"])
if "cls" in state:
_UpperCamelCase = tuple(state["cls"])
_UpperCamelCase = False
if state.get("add_prefix_space" , lowercase_) != add_prefix_space:
_UpperCamelCase = add_prefix_space
_UpperCamelCase = True
if state.get("trim_offsets" , lowercase_) != trim_offsets:
_UpperCamelCase = trim_offsets
_UpperCamelCase = True
if changes_to_apply:
_UpperCamelCase = getattr(lowercase_ , state.pop("type"))
_UpperCamelCase = component_class(**lowercase_)
setattr(self.backend_tokenizer , lowercase_ , lowercase_)
@property
def __UpperCAmelCase ( self : List[str]) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet.")
return None
return str(self._mask_token)
@mask_token.setter
def __UpperCAmelCase ( self : Any , lowercase_ : Tuple) -> Any:
"""simple docstring"""
_UpperCamelCase = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_) if isinstance(lowercase_ , lowercase_) else value
_UpperCamelCase = value
def __UpperCAmelCase ( self : List[Any] , *lowercase_ : Optional[Any] , **lowercase_ : Union[str, Any]) -> BatchEncoding:
"""simple docstring"""
_UpperCamelCase = kwargs.get("is_split_into_words" , lowercase_)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs.")
return super()._batch_encode_plus(*lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : Optional[int] , *lowercase_ : Optional[int] , **lowercase_ : Dict) -> BatchEncoding:
"""simple docstring"""
_UpperCamelCase = kwargs.get("is_split_into_words" , lowercase_)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs.")
return super()._encode_plus(*lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : str , lowercase_ : str , lowercase_ : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
_UpperCamelCase = self._tokenizer.model.save(lowercase_ , name=lowercase_)
return tuple(lowercase_)
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Dict , lowercase_ : Union[str, Any]=None) -> Dict:
"""simple docstring"""
_UpperCamelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self : str , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 82 | def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
assert (
isinstance(a__ , a__ ) and number_of_steps > 0
), f'number_of_steps needs to be positive integer, your input {number_of_steps}'
if number_of_steps == 1:
return 1
_UpperCamelCase , _UpperCamelCase = 1, 1
for _ in range(number_of_steps - 1 ):
_UpperCamelCase , _UpperCamelCase = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | 1 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCAmelCase ( lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def __UpperCAmelCase ( self : Tuple , lowercase_ : Dict=0) -> Tuple:
"""simple docstring"""
_UpperCamelCase = floats_tensor((1, 3, 128, 128) , rng=random.Random(lowercase_))
_UpperCamelCase = np.random.RandomState(lowercase_)
_UpperCamelCase = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"strength": 0.75,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def __UpperCAmelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = self.get_dummy_inputs()
_UpperCamelCase = pipe(**lowercase_).images
_UpperCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
_UpperCamelCase = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87])
assert np.abs(image_slice - expected_slice).max() < 1e-1
def __UpperCAmelCase ( self : Dict) -> Any:
"""simple docstring"""
_UpperCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
_UpperCamelCase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = self.get_dummy_inputs()
_UpperCamelCase = pipe(**lowercase_).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_UpperCamelCase = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def __UpperCAmelCase ( self : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
_UpperCamelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase_)
# warmup pass to apply optimizations
_UpperCamelCase = pipe(**self.get_dummy_inputs())
_UpperCamelCase = self.get_dummy_inputs()
_UpperCamelCase = pipe(**lowercase_).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_UpperCamelCase = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def __UpperCAmelCase ( self : Any) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
_UpperCamelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = self.get_dummy_inputs()
_UpperCamelCase = pipe(**lowercase_).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_UpperCamelCase = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def __UpperCAmelCase ( self : Union[str, Any]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
_UpperCamelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = self.get_dummy_inputs()
_UpperCamelCase = pipe(**lowercase_).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_UpperCamelCase = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def __UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
_UpperCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
_UpperCamelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = self.get_dummy_inputs()
_UpperCamelCase = pipe(**lowercase_).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_UpperCamelCase = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self : Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __UpperCAmelCase ( self : Tuple) -> int:
"""simple docstring"""
_UpperCamelCase = ort.SessionOptions()
_UpperCamelCase = False
return options
def __UpperCAmelCase ( self : List[Any]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg")
_UpperCamelCase = init_image.resize((768, 512))
# using the PNDM scheduler by default
_UpperCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=lowercase_ , feature_extractor=lowercase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = "A fantasy landscape, trending on artstation"
_UpperCamelCase = np.random.RandomState(0)
_UpperCamelCase = pipe(
prompt=lowercase_ , image=lowercase_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=lowercase_ , output_type="np" , )
_UpperCamelCase = output.images
_UpperCamelCase = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
_UpperCamelCase = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
def __UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
_UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg")
_UpperCamelCase = init_image.resize((768, 512))
_UpperCamelCase = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx")
_UpperCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=lowercase_ , safety_checker=lowercase_ , feature_extractor=lowercase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = "A fantasy landscape, trending on artstation"
_UpperCamelCase = np.random.RandomState(0)
_UpperCamelCase = pipe(
prompt=lowercase_ , image=lowercase_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=lowercase_ , output_type="np" , )
_UpperCamelCase = output.images
_UpperCamelCase = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
_UpperCamelCase = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
| 82 | from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , **lowercase_ : Tuple) -> Any:
"""simple docstring"""
super().__init__(**lowercase_)
if self.framework == "tf":
raise ValueError(f'The {self.__class__} is only available in PyTorch.')
requires_backends(self , "vision")
self.check_model_type(lowercase_)
def __call__( self : str , lowercase_ : Union[str, "Image.Image", List[Dict[str, Any]]] , lowercase_ : Union[str, List[str]] = None , **lowercase_ : str , ) -> List[str]:
"""simple docstring"""
if "text_queries" in kwargs:
_UpperCamelCase = kwargs.pop("text_queries")
if isinstance(lowercase_ , (str, Image.Image)):
_UpperCamelCase = {"image": image, "candidate_labels": candidate_labels}
else:
_UpperCamelCase = image
_UpperCamelCase = super().__call__(lowercase_ , **lowercase_)
return results
def __UpperCAmelCase ( self : Any , **lowercase_ : int) -> List[str]:
"""simple docstring"""
_UpperCamelCase = {}
if "threshold" in kwargs:
_UpperCamelCase = kwargs["threshold"]
if "top_k" in kwargs:
_UpperCamelCase = kwargs["top_k"]
return {}, {}, postprocess_params
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Any) -> List[str]:
"""simple docstring"""
_UpperCamelCase = load_image(inputs["image"])
_UpperCamelCase = inputs["candidate_labels"]
if isinstance(lowercase_ , lowercase_):
_UpperCamelCase = candidate_labels.split(",")
_UpperCamelCase = torch.tensor([[image.height, image.width]] , dtype=torch.intaa)
for i, candidate_label in enumerate(lowercase_):
_UpperCamelCase = self.tokenizer(lowercase_ , return_tensors=self.framework)
_UpperCamelCase = self.image_processor(lowercase_ , return_tensors=self.framework)
yield {
"is_last": i == len(lowercase_) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def __UpperCAmelCase ( self : Dict , lowercase_ : Tuple) -> str:
"""simple docstring"""
_UpperCamelCase = model_inputs.pop("target_size")
_UpperCamelCase = model_inputs.pop("candidate_label")
_UpperCamelCase = model_inputs.pop("is_last")
_UpperCamelCase = self.model(**lowercase_)
_UpperCamelCase = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def __UpperCAmelCase ( self : int , lowercase_ : Tuple , lowercase_ : List[str]=0.1 , lowercase_ : int=None) -> List[str]:
"""simple docstring"""
_UpperCamelCase = []
for model_output in model_outputs:
_UpperCamelCase = model_output["candidate_label"]
_UpperCamelCase = BaseModelOutput(lowercase_)
_UpperCamelCase = self.image_processor.post_process_object_detection(
outputs=lowercase_ , threshold=lowercase_ , target_sizes=model_output["target_size"])[0]
for index in outputs["scores"].nonzero():
_UpperCamelCase = outputs["scores"][index].item()
_UpperCamelCase = self._get_bounding_box(outputs["boxes"][index][0])
_UpperCamelCase = {"score": score, "label": label, "box": box}
results.append(lowercase_)
_UpperCamelCase = sorted(lowercase_ , key=lambda lowercase_: x["score"] , reverse=lowercase_)
if top_k:
_UpperCamelCase = results[:top_k]
return results
def __UpperCAmelCase ( self : str , lowercase_ : "torch.Tensor") -> Dict[str, int]:
"""simple docstring"""
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch.")
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = box.int().tolist()
_UpperCamelCase = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 82 | 1 |
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
lowerCamelCase__ = logging.getLogger(__name__)
lowerCamelCase__ = '''pytorch_model.bin'''
@dataclasses.dataclass
class _UpperCAmelCase :
'''simple docstring'''
__A = dataclasses.field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models.'''} )
__A = dataclasses.field(
default=lowerCAmelCase, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co.'''}, )
@dataclasses.dataclass
class _UpperCAmelCase :
'''simple docstring'''
__A = dataclasses.field(metadata={'''help''': '''A csv or a json file containing the training data.'''} )
__A = dataclasses.field(metadata={'''help''': '''A csv or a json file containing the data to predict on.'''} )
__A = dataclasses.field(
default=lowerCAmelCase, metadata={'''help''': '''A csv or a json file containing the validation data.'''} )
__A = dataclasses.field(
default=lowerCAmelCase, metadata={'''help''': '''The name of the task to train on.'''}, )
__A = dataclasses.field(
default=lowerCAmelCase, metadata={'''help''': '''The list of labels for the task.'''} )
@dataclasses.dataclass
class _UpperCAmelCase :
'''simple docstring'''
__A = dataclasses.field(
metadata={'''help''': '''The output directory where the model predictions and checkpoints will be written.'''} )
__A = dataclasses.field(
default='''accuracy''', metadata={'''help''': '''The evaluation metric used for the task.'''} )
__A = dataclasses.field(
default='''no''', metadata={
'''help''': '''The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]'''
}, )
__A = dataclasses.field(
default=10, metadata={'''help''': '''Number of evaluation calls with no improvement after which training will be stopped.'''}, )
__A = dataclasses.field(
default=0.0, metadata={
'''help''': '''How much the specified evaluation metric must improve to satisfy early stopping conditions.'''
}, )
__A = dataclasses.field(
default=lowerCAmelCase, metadata={'''help''': '''Whether to filter the pseudo-labeled data based on the confidence score.'''}, )
__A = dataclasses.field(
default=lowerCAmelCase, metadata={'''help''': '''Whether to filter the pseudo-labeled data based on the validation performance.'''}, )
__A = dataclasses.field(
default=lowerCAmelCase, metadata={'''help''': '''Whether to fine-tune on labeled data after pseudo training.'''}, )
__A = dataclasses.field(
default=0.0, metadata={'''help''': '''Confidence threshold for pseudo-labeled data filtering.'''}, )
__A = dataclasses.field(
default=100, metadata={'''help''': '''Number of evaluation calls with no improvement after which training will be stopped.'''}, )
__A = dataclasses.field(
default=lowerCAmelCase, metadata={'''help''': '''Random seed for initialization.'''}, )
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ , a__ , a__ ) ->List[str]:
'''simple docstring'''
_UpperCamelCase = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
_UpperCamelCase = dataset.filter(lambda a__ : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
_UpperCamelCase = int(eval_result * len(a__ ) )
print(a__ )
_UpperCamelCase = dataset.sort("probability" , reverse=a__ )
_UpperCamelCase = dataset.select(range(a__ ) )
_UpperCamelCase = dataset.remove_columns(["label", "probability"] )
_UpperCamelCase = dataset.rename_column("prediction" , "label" )
_UpperCamelCase = dataset.map(lambda a__ : {"label": idalabel[example["label"]]} )
_UpperCamelCase = dataset.shuffle(seed=args.seed )
_UpperCamelCase = os.path.join(a__ , f'train_pseudo.{args.data_file_extension}' )
if args.data_file_extension == "csv":
dataset.to_csv(a__ , index=a__ )
else:
dataset.to_json(a__ )
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ , **a__ ) ->Any:
'''simple docstring'''
_UpperCamelCase = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
_UpperCamelCase = STModelArguments(model_name_or_path=a__ )
_UpperCamelCase = STDataArguments(train_file=a__ , infer_file=a__ )
_UpperCamelCase = STTrainingArguments(output_dir=a__ )
_UpperCamelCase = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(a__ ).items():
setattr(a__ , a__ , a__ )
for key, value in kwargs.items():
if hasattr(a__ , a__ ):
setattr(a__ , a__ , a__ )
# Sanity checks
_UpperCamelCase = {}
_UpperCamelCase = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
_UpperCamelCase = args.train_file
_UpperCamelCase = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
_UpperCamelCase = args.eval_file
for key in data_files:
_UpperCamelCase = data_files[key].split("." )[-1]
assert extension in ["csv", "json"], f'`{key}_file` should be a csv or a json file.'
if args.data_file_extension is None:
_UpperCamelCase = extension
else:
assert extension == args.data_file_extension, f'`{key}_file` should be a {args.data_file_extension} file`.'
assert (
args.eval_metric in datasets.list_metrics()
), f'{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info("Creating the initial data directory for self-training..." )
_UpperCamelCase = f'{args.output_dir}/self-train_iter-{{}}'.format
_UpperCamelCase = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=a__ )
os.makedirs(a__ , exist_ok=a__ )
accelerator.wait_for_everyone()
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = 0
_UpperCamelCase = False
# Show the progress bar
_UpperCamelCase = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
_UpperCamelCase = data_dir_format(a__ )
assert os.path.exists(a__ )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
_UpperCamelCase = os.path.join(a__ , "stage-1" )
_UpperCamelCase = {
"accelerator": accelerator,
"model_name_or_path": args.model_name_or_path,
"cache_dir": args.cache_dir,
"do_train": True,
"train_file": data_files["train"] if iteration == 0 else data_files["train_pseudo"],
"do_eval": True if args.eval_file is not None else False,
"eval_file": data_files["eval"],
"do_predict": True,
"infer_file": data_files["infer"],
"task_name": args.task_name,
"label_list": args.label_list,
"output_dir": current_output_dir,
"eval_metric": args.eval_metric,
"evaluation_strategy": args.evaluation_strategy,
"early_stopping_patience": args.early_stopping_patience,
"early_stopping_threshold": args.early_stopping_threshold,
"seed": args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(a__ , a__ ):
arguments_dict.update({key: value} )
_UpperCamelCase = os.path.join(a__ , "best-checkpoint" , a__ )
if os.path.exists(a__ ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1." , a__ , a__ , )
else:
logger.info("***** Running self-training: iteration: %d, stage: 1 *****" , a__ )
finetune(**a__ )
accelerator.wait_for_everyone()
assert os.path.exists(a__ )
logger.info("Self-training job completed: iteration: %d, stage: 1." , a__ )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
_UpperCamelCase = os.path.join(a__ , "best-checkpoint" )
_UpperCamelCase = os.path.join(a__ , "stage-2" )
# Update arguments_dict
_UpperCamelCase = model_path
_UpperCamelCase = data_files["train"]
_UpperCamelCase = current_output_dir
_UpperCamelCase = os.path.join(a__ , "best-checkpoint" , a__ )
if os.path.exists(a__ ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2." , a__ , a__ , )
else:
logger.info("***** Running self-training: iteration: %d, stage: 2 *****" , a__ )
finetune(**a__ )
accelerator.wait_for_everyone()
assert os.path.exists(a__ )
logger.info("Self-training job completed: iteration: %d, stage: 2." , a__ )
_UpperCamelCase = iteration
_UpperCamelCase = data_dir_format(iteration + 1 )
_UpperCamelCase = AutoConfig.from_pretrained(os.path.join(a__ , "best-checkpoint" ) )
_UpperCamelCase = config.idalabel
_UpperCamelCase = os.path.join(a__ , "eval_results_best-checkpoint.json" )
_UpperCamelCase = os.path.join(a__ , "test_results_best-checkpoint.json" )
assert os.path.exists(a__ )
with open(a__ , "r" ) as f:
_UpperCamelCase = float(json.load(a__ )[args.eval_metric] )
_UpperCamelCase = os.path.join(a__ , "infer_output_best-checkpoint.csv" )
assert os.path.exists(a__ )
# Loading the dataset from local csv or json files.
_UpperCamelCase = load_dataset(args.data_file_extension , data_files={"data": data_files["infer"]} )["data"]
_UpperCamelCase = load_dataset("csv" , data_files={"data": infer_output_file} )["data"]
if accelerator.is_main_process:
os.makedirs(a__ , exist_ok=a__ )
shutil.copy(a__ , os.path.join(a__ , f'eval_results_iter-{iteration}.json' ) )
if os.path.exists(a__ ):
shutil.copy(a__ , os.path.join(a__ , f'test_results_iter-{iteration}.json' ) )
create_pseudo_labeled_data(a__ , a__ , a__ , a__ , a__ , a__ )
accelerator.wait_for_everyone()
_UpperCamelCase = os.path.join(a__ , f'train_pseudo.{args.data_file_extension}' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
_UpperCamelCase = eval_result
if best_iteration is None:
_UpperCamelCase = new_iteration
_UpperCamelCase = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
_UpperCamelCase = new_iteration
_UpperCamelCase = new_eval_result
_UpperCamelCase = 0
else:
if new_eval_result == best_eval_result:
_UpperCamelCase = new_iteration
_UpperCamelCase = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
_UpperCamelCase = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info("Best iteration: %d" , a__ )
logger.info("Best evaluation result: %s = %f" , args.eval_metric , a__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(a__ , f'eval_results_iter-{iteration}.json' ) , os.path.join(a__ , "eval_results_best-iteration.json" ) , )
else:
# Assume that the last iteration is the best
logger.info("Best iteration: %d" , args.max_selftrain_iterations - 1 )
logger.info("Best evaluation result: %s = %f" , args.eval_metric , a__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(a__ , f'eval_results_iter-{args.max_selftrain_iterations - 1}.json' ) , os.path.join(a__ , "eval_results_best-iteration.json" ) , )
| 82 | import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : List[str] , lowercase_ : str) -> str:
"""simple docstring"""
with open(lowercase_ , encoding="utf-8") as input_file:
_UpperCamelCase = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)")
_UpperCamelCase = input_file.read()
_UpperCamelCase = regexp.search(lowercase_)
return match
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : str) -> int:
"""simple docstring"""
with open(lowercase_ , encoding="utf-8") as input_file:
_UpperCamelCase = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL)
_UpperCamelCase = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
_UpperCamelCase = regexp.finditer(lowercase_)
_UpperCamelCase = [match for match in matches if match is not None and match.group(1) is not None]
return matches[0] if matches else None
def __UpperCAmelCase ( self : int) -> int:
"""simple docstring"""
_UpperCamelCase = Path("./datasets")
_UpperCamelCase = list(dataset_paths.absolute().glob("**/*.py"))
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(lowercase_)):
raise AssertionError(f'open(...) must use utf-8 encoding in {dataset}')
def __UpperCAmelCase ( self : str) -> str:
"""simple docstring"""
_UpperCamelCase = Path("./datasets")
_UpperCamelCase = list(dataset_paths.absolute().glob("**/*.py"))
for dataset in dataset_files:
if self._no_print_statements(str(lowercase_)):
raise AssertionError(f'print statement found in {dataset}. Use datasets.logger/logging instead.')
| 82 | 1 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
lowerCamelCase__ = '''\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",
author = "Lin, Chin-Yew and
Och, Franz Josef",
booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",
month = "aug 23{--}aug 27",
year = "2004",
address = "Geneva, Switzerland",
publisher = "COLING",
url = "https://www.aclweb.org/anthology/C04-1072",
pages = "501--507",
}
'''
lowerCamelCase__ = '''\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,
the better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
'''
lowerCamelCase__ = '''
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
\'bleu\': bleu score,
\'precisions\': geometric mean of n-gram precisions,
\'brevity_penalty\': brevity penalty,
\'length_ratio\': ratio of lengths,
\'translation_length\': translation_length,
\'reference_length\': reference_length
Examples:
>>> predictions = [
... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample
... ["foo", "bar", "foobar"] # tokenized prediction of the second sample
... ]
>>> references = [
... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)
... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric("bleu")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results["bleu"])
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def __UpperCAmelCase ( self : List[Any]) -> int:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token") , id="sequence"),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token") , id="sequence") , id="references"),
}) , codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"] , reference_urls=[
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
] , )
def __UpperCAmelCase ( self : List[str] , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : List[str]=4 , lowercase_ : Optional[int]=False) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = compute_bleu(
reference_corpus=lowercase_ , translation_corpus=lowercase_ , max_order=lowercase_ , smooth=lowercase_)
((_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase)) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 82 | import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowerCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : str = None , lowercase_ : uuid.UUID = None , lowercase_ : List[Any]=None , lowercase_ : int=None) -> Dict:
"""simple docstring"""
if not conversation_id:
_UpperCamelCase = uuid.uuida()
if past_user_inputs is None:
_UpperCamelCase = []
if generated_responses is None:
_UpperCamelCase = []
_UpperCamelCase = conversation_id
_UpperCamelCase = past_user_inputs
_UpperCamelCase = generated_responses
_UpperCamelCase = text
def __eq__( self : Optional[Any] , lowercase_ : Optional[Any]) -> List[Any]:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def __UpperCAmelCase ( self : List[Any] , lowercase_ : str , lowercase_ : bool = False) -> Any:
"""simple docstring"""
if self.new_user_input:
if overwrite:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
f'with: "{text}".')
_UpperCamelCase = text
else:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
f'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input')
else:
_UpperCamelCase = text
def __UpperCAmelCase ( self : Optional[int]) -> List[Any]:
"""simple docstring"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input)
_UpperCamelCase = None
def __UpperCAmelCase ( self : Dict , lowercase_ : str) -> Optional[Any]:
"""simple docstring"""
self.generated_responses.append(lowercase_)
def __UpperCAmelCase ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : Union[str, Any]) -> int:
"""simple docstring"""
_UpperCamelCase = f'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
_UpperCamelCase = "user" if is_user else "bot"
output += f'{name} >> {text} \n'
return output
@add_end_docstrings(
lowerCAmelCase, R'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''', )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : List[Any] , *lowercase_ : Optional[Any] , **lowercase_ : str) -> List[str]:
"""simple docstring"""
super().__init__(*lowercase_ , **lowercase_)
if self.tokenizer.pad_token_id is None:
_UpperCamelCase = self.tokenizer.eos_token
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Union[str, Any]=None , lowercase_ : int=None , lowercase_ : str=None , **lowercase_ : str) -> Tuple:
"""simple docstring"""
_UpperCamelCase = {}
_UpperCamelCase = {}
_UpperCamelCase = {}
if min_length_for_response is not None:
_UpperCamelCase = min_length_for_response
if minimum_tokens is not None:
_UpperCamelCase = minimum_tokens
if "max_length" in generate_kwargs:
_UpperCamelCase = generate_kwargs["max_length"]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
_UpperCamelCase = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(lowercase_)
return preprocess_params, forward_params, postprocess_params
def __call__( self : Any , lowercase_ : Union[Conversation, List[Conversation]] , lowercase_ : str=0 , **lowercase_ : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = super().__call__(lowercase_ , num_workers=lowercase_ , **lowercase_)
if isinstance(lowercase_ , lowercase_) and len(lowercase_) == 1:
return outputs[0]
return outputs
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Conversation , lowercase_ : Any=32) -> Dict[str, Any]:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_):
raise ValueError("ConversationalPipeline, expects Conversation as inputs")
if conversation.new_user_input is None:
raise ValueError(
f'Conversation with UUID {type(conversation.uuid)} does not contain new user input to process. '
"Add user inputs with the conversation's `add_user_input` method")
if hasattr(self.tokenizer , "_build_conversation_input_ids"):
_UpperCamelCase = self.tokenizer._build_conversation_input_ids(lowercase_)
else:
# If the tokenizer cannot handle conversations, we default to only the old version
_UpperCamelCase = self._legacy_parse_and_tokenize(lowercase_)
if self.framework == "pt":
_UpperCamelCase = torch.LongTensor([input_ids])
elif self.framework == "tf":
_UpperCamelCase = tf.constant([input_ids])
return {"input_ids": input_ids, "conversation": conversation}
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Any , lowercase_ : Optional[int]=10 , **lowercase_ : Dict) -> List[str]:
"""simple docstring"""
_UpperCamelCase = generate_kwargs.get("max_length" , self.model.config.max_length)
_UpperCamelCase = model_inputs["input_ids"].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})')
_UpperCamelCase = max_length - minimum_tokens
_UpperCamelCase = model_inputs["input_ids"][:, -trim:]
if "attention_mask" in model_inputs:
_UpperCamelCase = model_inputs["attention_mask"][:, -trim:]
_UpperCamelCase = model_inputs.pop("conversation")
_UpperCamelCase = max_length
_UpperCamelCase = self.model.generate(**lowercase_ , **lowercase_)
if self.model.config.is_encoder_decoder:
_UpperCamelCase = 1
else:
_UpperCamelCase = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : int=True) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = model_outputs["output_ids"]
_UpperCamelCase = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ , )
_UpperCamelCase = model_outputs["conversation"]
conversation.mark_processed()
conversation.append_response(lowercase_)
return conversation
def __UpperCAmelCase ( self : Any , lowercase_ : Conversation) -> Dict:
"""simple docstring"""
_UpperCamelCase = self.tokenizer.eos_token_id
_UpperCamelCase = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_) + [eos_token_id])
else:
input_ids.extend(self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_))
if len(lowercase_) > self.tokenizer.model_max_length:
_UpperCamelCase = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 82 | 1 |
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
lowerCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : int , *lowercase_ : Optional[int] , **lowercase_ : List[str]) -> None:
"""simple docstring"""
warnings.warn(
"The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use OwlViTImageProcessor instead." , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_)
| 82 | def lowerCAmelCase__ ( a__ = 50 ) ->int:
'''simple docstring'''
_UpperCamelCase = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F"{solution() = }")
| 82 | 1 |
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( a__=None , a__=None ) ->List[str]:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=a__ )
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
__A = list_field(
default=[], metadata={
'''help''': (
'''Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version'''
''' of all available models'''
)
}, )
__A = list_field(
default=[8], metadata={'''help''': '''List of batch sizes for which memory and time performance will be evaluated'''} )
__A = list_field(
default=[8, 32, 128, 512], metadata={'''help''': '''List of sequence lengths for which memory and time performance will be evaluated'''}, )
__A = field(
default=lowerCAmelCase, metadata={'''help''': '''Whether to benchmark inference of model. Inference can be disabled via --no-inference.'''}, )
__A = field(
default=lowerCAmelCase, metadata={'''help''': '''Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'''}, )
__A = field(
default=lowerCAmelCase, metadata={'''help''': '''Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'''} )
__A = field(default=lowerCAmelCase, metadata={'''help''': '''Use FP16 to accelerate inference.'''} )
__A = field(default=lowerCAmelCase, metadata={'''help''': '''Benchmark training of model'''} )
__A = field(default=lowerCAmelCase, metadata={'''help''': '''Verbose memory tracing'''} )
__A = field(
default=lowerCAmelCase, metadata={'''help''': '''Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'''}, )
__A = field(
default=lowerCAmelCase, metadata={
'''help''': '''Whether to perform memory measurements. Memory measurements can be disabled via --no-memory'''
}, )
__A = field(default=lowerCAmelCase, metadata={'''help''': '''Trace memory line by line'''} )
__A = field(default=lowerCAmelCase, metadata={'''help''': '''Save result to a CSV file'''} )
__A = field(default=lowerCAmelCase, metadata={'''help''': '''Save all print statements in a log file'''} )
__A = field(default=lowerCAmelCase, metadata={'''help''': '''Whether to print environment information'''} )
__A = field(
default=lowerCAmelCase, metadata={
'''help''': (
'''Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use'''
''' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled'''
''' for debugging / testing and on TPU.'''
)
}, )
__A = field(
default=F"""inference_time_{round(time() )}.csv""", metadata={'''help''': '''CSV filename used if saving time results to csv.'''}, )
__A = field(
default=F"""inference_memory_{round(time() )}.csv""", metadata={'''help''': '''CSV filename used if saving memory results to csv.'''}, )
__A = field(
default=F"""train_time_{round(time() )}.csv""", metadata={'''help''': '''CSV filename used if saving time results to csv for training.'''}, )
__A = field(
default=F"""train_memory_{round(time() )}.csv""", metadata={'''help''': '''CSV filename used if saving memory results to csv for training.'''}, )
__A = field(
default=F"""env_info_{round(time() )}.csv""", metadata={'''help''': '''CSV filename used if saving environment information.'''}, )
__A = field(
default=F"""log_{round(time() )}.csv""", metadata={'''help''': '''Log filename used if print statements are saved in log.'''}, )
__A = field(default=3, metadata={'''help''': '''Times an experiment will be run.'''} )
__A = field(
default=lowerCAmelCase, metadata={
'''help''': (
'''Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain'''
''' model weights.'''
)
}, )
def __UpperCAmelCase ( self : List[Any]) -> List[str]:
"""simple docstring"""
warnings.warn(
f'The class {self.__class__} is deprecated. Hugging Face Benchmarking utils'
" are deprecated in general and it is advised to use external Benchmarking libraries "
" to benchmark Transformer models." , lowercase_ , )
def __UpperCAmelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
return json.dumps(dataclasses.asdict(self) , indent=2)
@property
def __UpperCAmelCase ( self : Any) -> List[str]:
"""simple docstring"""
if len(self.models) <= 0:
raise ValueError(
"Please make sure you provide at least one model name / model identifier, *e.g.* `--models"
" bert-base-cased` or `args.models = ['bert-base-cased'].")
return self.models
@property
def __UpperCAmelCase ( self : Any) -> Tuple:
"""simple docstring"""
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("Multiprocessing is currently not possible on TPU.")
return False
else:
return True
| 82 | import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->int:
'''simple docstring'''
_UpperCamelCase = 1.5
_UpperCamelCase = int(factor * num_class_images )
_UpperCamelCase = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=a__ , aesthetic_weight=0.1 )
os.makedirs(f'{class_data_dir}/images' , exist_ok=a__ )
if len(list(Path(f'{class_data_dir}/images' ).iterdir() ) ) >= num_class_images:
return
while True:
_UpperCamelCase = client.query(text=a__ )
if len(a__ ) >= factor * num_class_images or num_images > 1e4:
break
else:
_UpperCamelCase = int(factor * num_images )
_UpperCamelCase = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=a__ , aesthetic_weight=0.1 , )
_UpperCamelCase = 0
_UpperCamelCase = 0
_UpperCamelCase = tqdm(desc="downloading real regularization images" , total=a__ )
with open(f'{class_data_dir}/caption.txt' , "w" ) as fa, open(f'{class_data_dir}/urls.txt' , "w" ) as fa, open(
f'{class_data_dir}/images.txt' , "w" ) as fa:
while total < num_class_images:
_UpperCamelCase = class_images[count]
count += 1
try:
_UpperCamelCase = requests.get(images["url"] )
if img.status_code == 200:
_UpperCamelCase = Image.open(BytesIO(img.content ) )
with open(f'{class_data_dir}/images/{total}.jpg' , "wb" ) as f:
f.write(img.content )
fa.write(images["caption"] + "\n" )
fa.write(images["url"] + "\n" )
fa.write(f'{class_data_dir}/images/{total}.jpg' + "\n" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowerCAmelCase__ ( ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase = argparse.ArgumentParser("" , add_help=a__ )
parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=a__ , type=a__ )
parser.add_argument("--class_data_dir" , help="path to save images" , required=a__ , type=a__ )
parser.add_argument("--num_class_images" , help="number of images to download" , default=200 , type=a__ )
return parser.parse_args()
if __name__ == "__main__":
lowerCamelCase__ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 82 | 1 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : int) -> List[str]:
"""simple docstring"""
_UpperCamelCase = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(lowercase_ , "tf_padding"))
self.parent.assertTrue(hasattr(lowercase_ , "depth_multiplier"))
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : Dict , lowercase_ : Tuple , lowercase_ : str=13 , lowercase_ : Any=3 , lowercase_ : str=32 , lowercase_ : List[str]=0.25 , lowercase_ : int=8 , lowercase_ : str=8 , lowercase_ : Tuple=6 , lowercase_ : Dict=32 , lowercase_ : Tuple=True , lowercase_ : List[str]=True , lowercase_ : Tuple=True , lowercase_ : Any="relu6" , lowercase_ : Optional[Any]=1280 , lowercase_ : int=0.1 , lowercase_ : int=0.02 , lowercase_ : Optional[int]=True , lowercase_ : Any=True , lowercase_ : Union[str, Any]=10 , lowercase_ : List[Any]=None , ) -> int:
"""simple docstring"""
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = num_channels
_UpperCamelCase = image_size
_UpperCamelCase = depth_multiplier
_UpperCamelCase = depth_divisible_by
_UpperCamelCase = min_depth
_UpperCamelCase = expand_ratio
_UpperCamelCase = tf_padding
_UpperCamelCase = output_stride
_UpperCamelCase = first_layer_is_expansion
_UpperCamelCase = finegrained_output
_UpperCamelCase = hidden_act
_UpperCamelCase = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier)
_UpperCamelCase = classifier_dropout_prob
_UpperCamelCase = use_labels
_UpperCamelCase = is_training
_UpperCamelCase = num_labels
_UpperCamelCase = initializer_range
_UpperCamelCase = scope
def __UpperCAmelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels)
_UpperCamelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
_UpperCamelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def __UpperCAmelCase ( self : Union[str, Any]) -> int:
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : List[str] , lowercase_ : str , lowercase_ : int , lowercase_ : Union[str, Any]) -> str:
"""simple docstring"""
_UpperCamelCase = MobileNetVaModel(config=lowercase_)
model.to(lowercase_)
model.eval()
_UpperCamelCase = model(lowercase_)
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Optional[int] , lowercase_ : Dict , lowercase_ : int , lowercase_ : int) -> int:
"""simple docstring"""
_UpperCamelCase = self.num_labels
_UpperCamelCase = MobileNetVaForImageClassification(lowercase_)
model.to(lowercase_)
model.eval()
_UpperCamelCase = model(lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __UpperCAmelCase ( self : str , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : str , lowercase_ : Any) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = self.num_labels
_UpperCamelCase = MobileNetVaForSemanticSegmentation(lowercase_)
model.to(lowercase_)
model.eval()
_UpperCamelCase = model(lowercase_)
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_UpperCamelCase = model(lowercase_ , labels=lowercase_)
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCAmelCase ( self : Dict) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( lowerCAmelCase, lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__A = (
{
'''feature-extraction''': MobileNetVaModel,
'''image-classification''': MobileNetVaForImageClassification,
'''image-segmentation''': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__A = False
__A = False
__A = False
__A = False
def __UpperCAmelCase ( self : int) -> List[str]:
"""simple docstring"""
_UpperCamelCase = MobileNetVaModelTester(self)
_UpperCamelCase = MobileNetVaConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_)
def __UpperCAmelCase ( self : Union[str, Any]) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV2 does not use inputs_embeds")
def __UpperCAmelCase ( self : int) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason="MobileNetV2 does not support input and output embeddings")
def __UpperCAmelCase ( self : Dict) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="MobileNetV2 does not output attentions")
def __UpperCAmelCase ( self : int) -> List[str]:
"""simple docstring"""
pass
def __UpperCAmelCase ( self : int) -> int:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(lowercase_)
_UpperCamelCase = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase_)
def __UpperCAmelCase ( self : Optional[int]) -> Dict:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def __UpperCAmelCase ( self : List[str]) -> List[Any]:
"""simple docstring"""
def check_hidden_states_output(lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any]):
_UpperCamelCase = model_class(lowercase_)
model.to(lowercase_)
model.eval()
with torch.no_grad():
_UpperCamelCase = model(**self._prepare_for_class(lowercase_ , lowercase_))
_UpperCamelCase = outputs.hidden_states
_UpperCamelCase = 16
self.assertEqual(len(lowercase_) , lowercase_)
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_)
def __UpperCAmelCase ( self : str) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_)
def __UpperCAmelCase ( self : Optional[Any]) -> str:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowercase_)
@slow
def __UpperCAmelCase ( self : Any) -> Dict:
"""simple docstring"""
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = MobileNetVaModel.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
def lowerCAmelCase__ ( ) ->List[str]:
'''simple docstring'''
_UpperCamelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCAmelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v2_1.0_224") if is_vision_available() else None
)
@slow
def __UpperCAmelCase ( self : Tuple) -> Dict:
"""simple docstring"""
_UpperCamelCase = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v2_1.0_224").to(lowercase_)
_UpperCamelCase = self.default_image_processor
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=lowercase_ , return_tensors="pt").to(lowercase_)
# forward pass
with torch.no_grad():
_UpperCamelCase = model(**lowercase_)
# verify the logits
_UpperCamelCase = torch.Size((1, 1001))
self.assertEqual(outputs.logits.shape , lowercase_)
_UpperCamelCase = torch.tensor([0.24_45, -1.19_93, 0.19_05]).to(lowercase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1e-4))
@slow
def __UpperCAmelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = MobileNetVaForSemanticSegmentation.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513")
_UpperCamelCase = model.to(lowercase_)
_UpperCamelCase = MobileNetVaImageProcessor.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513")
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=lowercase_ , return_tensors="pt").to(lowercase_)
# forward pass
with torch.no_grad():
_UpperCamelCase = model(**lowercase_)
_UpperCamelCase = outputs.logits
# verify the logits
_UpperCamelCase = torch.Size((1, 21, 65, 65))
self.assertEqual(logits.shape , lowercase_)
_UpperCamelCase = torch.tensor(
[
[[17.57_90, 17.75_81, 18.33_55], [18.32_57, 18.42_30, 18.89_73], [18.61_69, 18.86_50, 19.21_87]],
[[-2.15_95, -2.09_77, -2.37_41], [-2.42_26, -2.30_28, -2.68_35], [-2.78_19, -2.59_91, -2.77_06]],
[[4.20_58, 4.83_17, 4.76_38], [4.41_36, 5.03_61, 4.93_83], [4.50_28, 4.96_44, 4.87_34]],
] , device=lowercase_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowercase_ , atol=1e-4))
| 82 | import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
lowerCamelCase__ = '''\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
'''
lowerCamelCase__ = '''\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.
'''
lowerCamelCase__ = R'''
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting "1/2" to "\\frac{1}{2}")
Examples:
>>> metric = datasets.load_metric("competition_math")
>>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])
>>> print(results)
{\'accuracy\': 1.0}
'''
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def __UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string"),
"references": datasets.Value("string"),
}) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , )
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Tuple , lowercase_ : str) -> Tuple:
"""simple docstring"""
_UpperCamelCase = 0.0
for i, j in zip(lowercase_ , lowercase_):
n_correct += 1.0 if math_equivalence.is_equiv(lowercase_ , lowercase_) else 0.0
_UpperCamelCase = n_correct / len(lowercase_)
return {
"accuracy": accuracy,
}
| 82 | 1 |
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def lowerCAmelCase__ ( a__="" ) ->str:
'''simple docstring'''
_UpperCamelCase = tempfile.mkdtemp()
return os.path.join(a__ , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : Optional[int]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = torch.rand(12 , dtype=torch.floataa) - 0.5
_UpperCamelCase = AgentAudio(lowercase_)
_UpperCamelCase = str(agent_type.to_string())
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowercase_ , agent_type.to_raw() , atol=1e-4))
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(lowercase_))
# Ensure that the file contains the same value as the original tensor
_UpperCamelCase , _UpperCamelCase = sf.read(lowercase_)
self.assertTrue(torch.allclose(lowercase_ , torch.tensor(lowercase_) , atol=1e-4))
def __UpperCAmelCase ( self : List[Any]) -> str:
"""simple docstring"""
_UpperCamelCase = torch.rand(12 , dtype=torch.floataa) - 0.5
_UpperCamelCase = get_new_path(suffix=".wav")
sf.write(lowercase_ , lowercase_ , 16000)
_UpperCamelCase = AgentAudio(lowercase_)
self.assertTrue(torch.allclose(lowercase_ , agent_type.to_raw() , atol=1e-4))
self.assertEqual(agent_type.to_string() , lowercase_)
@require_vision
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : Optional[Any]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = torch.randint(0 , 256 , (64, 64, 3))
_UpperCamelCase = AgentImage(lowercase_)
_UpperCamelCase = str(agent_type.to_string())
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowercase_ , agent_type._tensor , atol=1e-4))
self.assertIsInstance(agent_type.to_raw() , Image.Image)
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowercase_))
def __UpperCAmelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
_UpperCamelCase = Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png"
_UpperCamelCase = Image.open(lowercase_)
_UpperCamelCase = AgentImage(lowercase_)
self.assertTrue(path.samefile(agent_type.to_string()))
self.assertTrue(image == agent_type.to_raw())
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowercase_))
def __UpperCAmelCase ( self : List[Any]) -> str:
"""simple docstring"""
_UpperCamelCase = Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png"
_UpperCamelCase = Image.open(lowercase_)
_UpperCamelCase = AgentImage(lowercase_)
self.assertFalse(path.samefile(agent_type.to_string()))
self.assertTrue(image == agent_type.to_raw())
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowercase_))
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : Any) -> Any:
"""simple docstring"""
_UpperCamelCase = "Hey!"
_UpperCamelCase = AgentText(lowercase_)
self.assertEqual(lowercase_ , agent_type.to_string())
self.assertEqual(lowercase_ , agent_type.to_raw())
self.assertEqual(lowercase_ , lowercase_)
| 82 | import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
lowerCamelCase__ = 5_0000
lowerCamelCase__ = 5000
lowerCamelCase__,lowerCamelCase__ = os.path.split(__file__)
lowerCamelCase__ = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def lowerCAmelCase__ ( a__ , a__ ) ->int:
'''simple docstring'''
for i in range(a__ ):
_UpperCamelCase = dataset[i]
@get_duration
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->int:
'''simple docstring'''
for i in range(0 , len(a__ ) , a__ ):
_UpperCamelCase = dataset[i : i + batch_size]
@get_duration
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->Union[str, Any]:
'''simple docstring'''
with dataset.formatted_as(type=a__ ):
for i in range(a__ ):
_UpperCamelCase = dataset[i]
@get_duration
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ ) ->Dict:
'''simple docstring'''
with dataset.formatted_as(type=a__ ):
for i in range(0 , a__ , a__ ):
_UpperCamelCase = dataset[i : i + batch_size]
def lowerCAmelCase__ ( ) ->Dict:
'''simple docstring'''
_UpperCamelCase = {"num examples": SPEED_TEST_N_EXAMPLES}
_UpperCamelCase = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted, {"type": "pandas", "length": SMALL_TEST}),
(read_formatted, {"type": "torch", "length": SMALL_TEST}),
(read_formatted, {"type": "tensorflow", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
_UpperCamelCase = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset" )
_UpperCamelCase = datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} )
_UpperCamelCase = generate_example_dataset(
os.path.join(a__ , "dataset.arrow" ) , a__ , num_examples=a__ , seq_shapes={"list": (100,)} , )
print("first set of iterations" )
for func, kwargs in functions:
print(func.__name__ , str(a__ ) )
_UpperCamelCase = func(a__ , **a__ )
print("shuffling dataset" )
_UpperCamelCase = dataset.shuffle()
print("Second set of iterations (after shuffling" )
for func, kwargs in functions_shuffled:
print("shuffled " , func.__name__ , str(a__ ) )
_UpperCamelCase = func(
a__ , **a__ )
with open(a__ , "wb" ) as f:
f.write(json.dumps(a__ ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 82 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ['''DeiTFeatureExtractor''']
lowerCamelCase__ = ['''DeiTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DeiTForImageClassification''',
'''DeiTForImageClassificationWithTeacher''',
'''DeiTForMaskedImageModeling''',
'''DeiTModel''',
'''DeiTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDeiTForImageClassification''',
'''TFDeiTForImageClassificationWithTeacher''',
'''TFDeiTForMaskedImageModeling''',
'''TFDeiTModel''',
'''TFDeiTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 82 | import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
torch.manual_seed(0)
_UpperCamelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def __UpperCAmelCase ( self : List[Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = self.dummy_uncond_unet
_UpperCamelCase = KarrasVeScheduler()
_UpperCamelCase = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_)
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = pipe(num_inference_steps=2 , generator=lowercase_ , output_type="numpy").images
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = pipe(num_inference_steps=2 , generator=lowercase_ , output_type="numpy" , return_dict=lowercase_)[0]
_UpperCamelCase = image[0, -3:, -3:, -1]
_UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
_UpperCamelCase = "google/ncsnpp-celebahq-256"
_UpperCamelCase = UNetaDModel.from_pretrained(lowercase_)
_UpperCamelCase = KarrasVeScheduler()
_UpperCamelCase = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_)
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = pipe(num_inference_steps=20 , generator=lowercase_ , output_type="numpy").images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_UpperCamelCase = np.array([0.5_78, 0.58_11, 0.59_24, 0.58_09, 0.5_87, 0.58_86, 0.58_61, 0.58_02, 0.5_86])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 82 | 1 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , **lowercase_ : Tuple) -> Any:
"""simple docstring"""
super().__init__(**lowercase_)
if self.framework == "tf":
raise ValueError(f'The {self.__class__} is only available in PyTorch.')
requires_backends(self , "vision")
self.check_model_type(lowercase_)
def __call__( self : str , lowercase_ : Union[str, "Image.Image", List[Dict[str, Any]]] , lowercase_ : Union[str, List[str]] = None , **lowercase_ : str , ) -> List[str]:
"""simple docstring"""
if "text_queries" in kwargs:
_UpperCamelCase = kwargs.pop("text_queries")
if isinstance(lowercase_ , (str, Image.Image)):
_UpperCamelCase = {"image": image, "candidate_labels": candidate_labels}
else:
_UpperCamelCase = image
_UpperCamelCase = super().__call__(lowercase_ , **lowercase_)
return results
def __UpperCAmelCase ( self : Any , **lowercase_ : int) -> List[str]:
"""simple docstring"""
_UpperCamelCase = {}
if "threshold" in kwargs:
_UpperCamelCase = kwargs["threshold"]
if "top_k" in kwargs:
_UpperCamelCase = kwargs["top_k"]
return {}, {}, postprocess_params
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Any) -> List[str]:
"""simple docstring"""
_UpperCamelCase = load_image(inputs["image"])
_UpperCamelCase = inputs["candidate_labels"]
if isinstance(lowercase_ , lowercase_):
_UpperCamelCase = candidate_labels.split(",")
_UpperCamelCase = torch.tensor([[image.height, image.width]] , dtype=torch.intaa)
for i, candidate_label in enumerate(lowercase_):
_UpperCamelCase = self.tokenizer(lowercase_ , return_tensors=self.framework)
_UpperCamelCase = self.image_processor(lowercase_ , return_tensors=self.framework)
yield {
"is_last": i == len(lowercase_) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def __UpperCAmelCase ( self : Dict , lowercase_ : Tuple) -> str:
"""simple docstring"""
_UpperCamelCase = model_inputs.pop("target_size")
_UpperCamelCase = model_inputs.pop("candidate_label")
_UpperCamelCase = model_inputs.pop("is_last")
_UpperCamelCase = self.model(**lowercase_)
_UpperCamelCase = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def __UpperCAmelCase ( self : int , lowercase_ : Tuple , lowercase_ : List[str]=0.1 , lowercase_ : int=None) -> List[str]:
"""simple docstring"""
_UpperCamelCase = []
for model_output in model_outputs:
_UpperCamelCase = model_output["candidate_label"]
_UpperCamelCase = BaseModelOutput(lowercase_)
_UpperCamelCase = self.image_processor.post_process_object_detection(
outputs=lowercase_ , threshold=lowercase_ , target_sizes=model_output["target_size"])[0]
for index in outputs["scores"].nonzero():
_UpperCamelCase = outputs["scores"][index].item()
_UpperCamelCase = self._get_bounding_box(outputs["boxes"][index][0])
_UpperCamelCase = {"score": score, "label": label, "box": box}
results.append(lowercase_)
_UpperCamelCase = sorted(lowercase_ , key=lambda lowercase_: x["score"] , reverse=lowercase_)
if top_k:
_UpperCamelCase = results[:top_k]
return results
def __UpperCAmelCase ( self : str , lowercase_ : "torch.Tensor") -> Dict[str, int]:
"""simple docstring"""
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch.")
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = box.int().tolist()
_UpperCamelCase = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 82 | import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__A = MODEL_FOR_MASKED_LM_MAPPING
__A = TF_MODEL_FOR_MASKED_LM_MAPPING
def __UpperCAmelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def __UpperCAmelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf")
_UpperCamelCase = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is grouped", "score": 2.1e-0_5, "token": 38015, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-0_5, "token": 25506, "token_str": " accuser"},
] , )
_UpperCamelCase = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-0_5,
"token": 38015,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-0_5,
"token": 25506,
"token_str": " accuser",
},
] , )
_UpperCamelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3)
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is Clara", "score": 2e-0_5, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-0_5, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-0_5, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt")
_UpperCamelCase = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is Maul", "score": 2.2e-0_5, "token": 35676, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-0_5, "token": 16416, "token_str": "ELS"},
] , )
_UpperCamelCase = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-0_5,
"token": 35676,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-0_5, "token": 16416, "token_str": "ELS"},
] , )
_UpperCamelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3)
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is Patrick", "score": 2.1e-0_5, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-0_5, "token": 2941, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-0_5, "token": 13606, "token_str": " Clara"},
] , )
_UpperCamelCase = unmasker("My name is <mask> <mask>" , top_k=2)
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
[
{
"score": 2.2e-0_5,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-0_5, "token": 16416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-0_5,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-0_5, "token": 16416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def __UpperCAmelCase ( self : Tuple) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt")
# convert model to fp16
pipe.model.half()
_UpperCamelCase = pipe("Paris is the [MASK] of France.")
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(lowercase_ , lowercase_)
@slow
@require_torch
def __UpperCAmelCase ( self : List[Any]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt")
self.run_large_test(lowercase_)
@slow
@require_tf
def __UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf")
self.run_large_test(lowercase_)
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : int) -> Any:
"""simple docstring"""
_UpperCamelCase = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(lowercase_) , [
{"sequence": "My name is John", "score": 0.0_08, "token": 610, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.0_07, "token": 1573, "token_str": " Chris"},
] , )
_UpperCamelCase = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(lowercase_) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.2_51,
"token": 2201,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.2_14,
"token": 12790,
"token_str": " Lyon",
},
] , )
_UpperCamelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3)
self.assertEqual(
nested_simplify(lowercase_) , [
{"sequence": "My name is Patrick", "score": 0.0_05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.0_00, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.0_00, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt")
_UpperCamelCase = None
_UpperCamelCase = None
self.run_pipeline_test(lowercase_ , [])
@require_tf
def __UpperCAmelCase ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf")
_UpperCamelCase = None
_UpperCamelCase = None
self.run_pipeline_test(lowercase_ , [])
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Optional[int]) -> int:
"""simple docstring"""
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)")
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = [
f'This is another {tokenizer.mask_token} test',
]
return fill_masker, examples
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[int]) -> str:
"""simple docstring"""
_UpperCamelCase = fill_masker.tokenizer
_UpperCamelCase = fill_masker.model
_UpperCamelCase = fill_masker(
f'This is a {tokenizer.mask_token}' , )
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = fill_masker([f'This is a {tokenizer.mask_token}'])
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = fill_masker([f'This is a {tokenizer.mask_token}', f'Another {tokenizer.mask_token} great test.'])
self.assertEqual(
lowercase_ , [
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
] , )
with self.assertRaises(lowercase_):
fill_masker([None])
# No mask_token is not supported
with self.assertRaises(lowercase_):
fill_masker("This is")
self.run_test_top_k(lowercase_ , lowercase_)
self.run_test_targets(lowercase_ , lowercase_)
self.run_test_top_k_targets(lowercase_ , lowercase_)
self.fill_mask_with_duplicate_targets_and_top_k(lowercase_ , lowercase_)
self.fill_mask_with_multiple_masks(lowercase_ , lowercase_)
def __UpperCAmelCase ( self : int , lowercase_ : Dict , lowercase_ : List[str]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = tokenizer.get_vocab()
_UpperCamelCase = sorted(vocab.keys())[:2]
# Pipeline argument
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_ , targets=lowercase_)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}')
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , lowercase_)
_UpperCamelCase = [tokenizer.decode([x]) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(lowercase_))
# Call argument
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase_)
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , lowercase_)
_UpperCamelCase = [tokenizer.decode([x]) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(lowercase_))
# Score equivalence
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase_)
_UpperCamelCase = [top_mask["token_str"] for top_mask in outputs]
_UpperCamelCase = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowercase_) == set(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase_)
_UpperCamelCase = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(lowercase_) , nested_simplify(lowercase_))
# Raises with invalid
with self.assertRaises(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[])
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[""])
with self.assertRaises(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets="")
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : str , lowercase_ : List[str]) -> Any:
"""simple docstring"""
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_ , top_k=2)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}')
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2)
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
self.assertEqual(nested_simplify(lowercase_) , nested_simplify(lowercase_))
def __UpperCAmelCase ( self : Any , lowercase_ : Union[str, Any] , lowercase_ : Tuple) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = tokenizer.get_vocab()
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
# top_k=2, ntargets=3
_UpperCamelCase = sorted(vocab.keys())[:3]
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2 , targets=lowercase_)
# If we use the most probably targets, and filter differently, we should still
# have the same results
_UpperCamelCase = [el["token_str"] for el in sorted(lowercase_ , key=lambda lowercase_: x["score"] , reverse=lowercase_)]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowercase_).issubset(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=3 , targets=lowercase_)
# They should yield exactly the same result
self.assertEqual(nested_simplify(lowercase_) , nested_simplify(lowercase_))
def __UpperCAmelCase ( self : int , lowercase_ : Optional[int] , lowercase_ : List[str]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = tokenizer.get_vocab()
# String duplicates + id duplicates
_UpperCamelCase = sorted(vocab.keys())[:3]
_UpperCamelCase = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_UpperCamelCase = fill_masker(f'My name is {tokenizer.mask_token}' , targets=lowercase_ , top_k=10)
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(lowercase_) , 3)
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Any) -> Dict:
"""simple docstring"""
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = fill_masker(
f'This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}' , top_k=2)
self.assertEqual(
lowercase_ , [
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
] , )
| 82 | 1 |
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : Tuple , lowercase_ : Tuple=13 , lowercase_ : List[Any]=7 , lowercase_ : Tuple=True , lowercase_ : List[Any]=True , lowercase_ : str=True , lowercase_ : Union[str, Any]=True , lowercase_ : Optional[int]=99 , lowercase_ : List[Any]=32 , lowercase_ : Optional[Any]=5 , lowercase_ : Optional[int]=4 , lowercase_ : Any=4 , lowercase_ : Optional[Any]="gelu" , lowercase_ : List[Any]=0.0 , lowercase_ : int=0.1 , lowercase_ : Optional[int]=True , lowercase_ : List[str]=512 , lowercase_ : List[Any]=16 , lowercase_ : Optional[Any]=2 , lowercase_ : Any=0.02 , lowercase_ : Optional[int]=3 , lowercase_ : Optional[Any]=4 , lowercase_ : Dict=None , ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_multiple_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = weight_tying
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = scope
def __UpperCAmelCase ( self : List[Any]) -> int:
"""simple docstring"""
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCamelCase = self.get_config()
return config, input_ids, input_mask, token_labels
def __UpperCAmelCase ( self : Tuple) -> str:
"""simple docstring"""
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self : Optional[Any]) -> str:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase = True
return config, input_ids, input_mask, token_labels
def __UpperCAmelCase ( self : List[str] , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : Optional[Any]) -> int:
"""simple docstring"""
_UpperCamelCase = GPTNeoXJapaneseModel(config=lowercase_)
model.to(lowercase_)
model.eval()
_UpperCamelCase = model(lowercase_ , attention_mask=lowercase_)
_UpperCamelCase = model(lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __UpperCAmelCase ( self : Dict , lowercase_ : Tuple , lowercase_ : Any , lowercase_ : Union[str, Any]) -> Any:
"""simple docstring"""
_UpperCamelCase = True
_UpperCamelCase = GPTNeoXJapaneseModel(lowercase_)
model.to(lowercase_)
model.eval()
_UpperCamelCase = model(lowercase_ , attention_mask=lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __UpperCAmelCase ( self : Tuple , lowercase_ : List[str] , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : int) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = GPTNeoXJapaneseForCausalLM(config=lowercase_)
model.to(lowercase_)
model.eval()
_UpperCamelCase = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Dict , lowercase_ : int , lowercase_ : List[Any]) -> str:
"""simple docstring"""
_UpperCamelCase = True
_UpperCamelCase = GPTNeoXJapaneseForCausalLM(config=lowercase_)
model.to(lowercase_)
model.eval()
# first forward pass
_UpperCamelCase = model(lowercase_ , attention_mask=lowercase_ , use_cache=lowercase_)
_UpperCamelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size)
_UpperCamelCase = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
_UpperCamelCase = torch.cat([input_ids, next_tokens] , dim=-1)
_UpperCamelCase = torch.cat([input_mask, next_mask] , dim=-1)
_UpperCamelCase = model(lowercase_ , attention_mask=lowercase_ , output_hidden_states=lowercase_)
_UpperCamelCase = output_from_no_past["hidden_states"][0]
_UpperCamelCase = model(
lowercase_ , attention_mask=lowercase_ , past_key_values=lowercase_ , output_hidden_states=lowercase_ , )["hidden_states"][0]
# select random slice
_UpperCamelCase = ids_tensor((1,) , output_from_past.shape[-1]).item()
_UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
_UpperCamelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-3))
def __UpperCAmelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( lowerCAmelCase, lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
__A = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
__A = (
{'''feature-extraction''': GPTNeoXJapaneseModel, '''text-generation''': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
__A = False
__A = False
__A = False
__A = False
def __UpperCAmelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
_UpperCamelCase = GPTNeoXJapaneseModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=lowercase_ , hidden_size=37)
def __UpperCAmelCase ( self : Union[str, Any]) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowercase_ , lowercase_ , lowercase_)
def __UpperCAmelCase ( self : List[Any]) -> Tuple:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowercase_ , lowercase_ , lowercase_)
def __UpperCAmelCase ( self : Optional[int]) -> List[str]:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
_UpperCamelCase = None
self.model_tester.create_and_check_model_as_decoder(lowercase_ , lowercase_ , lowercase_)
def __UpperCAmelCase ( self : Optional[int]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowercase_ , lowercase_ , lowercase_)
def __UpperCAmelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowercase_)
@slow
def __UpperCAmelCase ( self : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = "abeja/gpt-neox-japanese-2.7b"
_UpperCamelCase = ["データサイエンティストとは、", "100年後に必要とされる会社は、", "フルリモートの環境で働くために必要なことは、", "国境の長いトンネルを抜けると", "美味しい日本食といえば、"]
_UpperCamelCase = [
"データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。",
"100年後に必要とされる会社は、「人」が中心の会社です。",
"フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。",
"国境の長いトンネルを抜けると、そこは雪国だった。",
"美味しい日本食といえば、やっぱりお寿司ですよね。",
]
_UpperCamelCase = GPTNeoXJapaneseTokenizer.from_pretrained(lowercase_)
_UpperCamelCase = GPTNeoXJapaneseForCausalLM.from_pretrained(lowercase_)
_UpperCamelCase = []
for prompt in prompts:
_UpperCamelCase = tokenizer(lowercase_ , return_tensors="pt").input_ids
_UpperCamelCase = model.generate(lowercase_ , max_length=50)
_UpperCamelCase = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_)
predicted_outputs += generated_string
self.assertListEqual(lowercase_ , lowercase_)
| 82 | lowerCamelCase__ = '''Alexander Joslin'''
import operator as op
from .stack import Stack
def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
_UpperCamelCase = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
_UpperCamelCase = Stack()
_UpperCamelCase = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(a__ ) )
elif i in operators:
# RULE 2
operator_stack.push(a__ )
elif i == ")":
# RULE 4
_UpperCamelCase = operator_stack.peek()
operator_stack.pop()
_UpperCamelCase = operand_stack.peek()
operand_stack.pop()
_UpperCamelCase = operand_stack.peek()
operand_stack.pop()
_UpperCamelCase = operators[opr](a__ , a__ )
operand_stack.push(a__ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCamelCase__ = '''(5 + ((4 * 2) * (2 + 3)))'''
# answer = 45
print(F"{equation} = {dijkstras_two_stack_algorithm(equation)}")
| 82 | 1 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowerCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : str = None , lowercase_ : uuid.UUID = None , lowercase_ : List[Any]=None , lowercase_ : int=None) -> Dict:
"""simple docstring"""
if not conversation_id:
_UpperCamelCase = uuid.uuida()
if past_user_inputs is None:
_UpperCamelCase = []
if generated_responses is None:
_UpperCamelCase = []
_UpperCamelCase = conversation_id
_UpperCamelCase = past_user_inputs
_UpperCamelCase = generated_responses
_UpperCamelCase = text
def __eq__( self : Optional[Any] , lowercase_ : Optional[Any]) -> List[Any]:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def __UpperCAmelCase ( self : List[Any] , lowercase_ : str , lowercase_ : bool = False) -> Any:
"""simple docstring"""
if self.new_user_input:
if overwrite:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
f'with: "{text}".')
_UpperCamelCase = text
else:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
f'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input')
else:
_UpperCamelCase = text
def __UpperCAmelCase ( self : Optional[int]) -> List[Any]:
"""simple docstring"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input)
_UpperCamelCase = None
def __UpperCAmelCase ( self : Dict , lowercase_ : str) -> Optional[Any]:
"""simple docstring"""
self.generated_responses.append(lowercase_)
def __UpperCAmelCase ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : Union[str, Any]) -> int:
"""simple docstring"""
_UpperCamelCase = f'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
_UpperCamelCase = "user" if is_user else "bot"
output += f'{name} >> {text} \n'
return output
@add_end_docstrings(
lowerCAmelCase, R'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''', )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : List[Any] , *lowercase_ : Optional[Any] , **lowercase_ : str) -> List[str]:
"""simple docstring"""
super().__init__(*lowercase_ , **lowercase_)
if self.tokenizer.pad_token_id is None:
_UpperCamelCase = self.tokenizer.eos_token
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Union[str, Any]=None , lowercase_ : int=None , lowercase_ : str=None , **lowercase_ : str) -> Tuple:
"""simple docstring"""
_UpperCamelCase = {}
_UpperCamelCase = {}
_UpperCamelCase = {}
if min_length_for_response is not None:
_UpperCamelCase = min_length_for_response
if minimum_tokens is not None:
_UpperCamelCase = minimum_tokens
if "max_length" in generate_kwargs:
_UpperCamelCase = generate_kwargs["max_length"]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
_UpperCamelCase = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(lowercase_)
return preprocess_params, forward_params, postprocess_params
def __call__( self : Any , lowercase_ : Union[Conversation, List[Conversation]] , lowercase_ : str=0 , **lowercase_ : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = super().__call__(lowercase_ , num_workers=lowercase_ , **lowercase_)
if isinstance(lowercase_ , lowercase_) and len(lowercase_) == 1:
return outputs[0]
return outputs
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Conversation , lowercase_ : Any=32) -> Dict[str, Any]:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_):
raise ValueError("ConversationalPipeline, expects Conversation as inputs")
if conversation.new_user_input is None:
raise ValueError(
f'Conversation with UUID {type(conversation.uuid)} does not contain new user input to process. '
"Add user inputs with the conversation's `add_user_input` method")
if hasattr(self.tokenizer , "_build_conversation_input_ids"):
_UpperCamelCase = self.tokenizer._build_conversation_input_ids(lowercase_)
else:
# If the tokenizer cannot handle conversations, we default to only the old version
_UpperCamelCase = self._legacy_parse_and_tokenize(lowercase_)
if self.framework == "pt":
_UpperCamelCase = torch.LongTensor([input_ids])
elif self.framework == "tf":
_UpperCamelCase = tf.constant([input_ids])
return {"input_ids": input_ids, "conversation": conversation}
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Any , lowercase_ : Optional[int]=10 , **lowercase_ : Dict) -> List[str]:
"""simple docstring"""
_UpperCamelCase = generate_kwargs.get("max_length" , self.model.config.max_length)
_UpperCamelCase = model_inputs["input_ids"].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})')
_UpperCamelCase = max_length - minimum_tokens
_UpperCamelCase = model_inputs["input_ids"][:, -trim:]
if "attention_mask" in model_inputs:
_UpperCamelCase = model_inputs["attention_mask"][:, -trim:]
_UpperCamelCase = model_inputs.pop("conversation")
_UpperCamelCase = max_length
_UpperCamelCase = self.model.generate(**lowercase_ , **lowercase_)
if self.model.config.is_encoder_decoder:
_UpperCamelCase = 1
else:
_UpperCamelCase = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : int=True) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = model_outputs["output_ids"]
_UpperCamelCase = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ , )
_UpperCamelCase = model_outputs["conversation"]
conversation.mark_processed()
conversation.append_response(lowercase_)
return conversation
def __UpperCAmelCase ( self : Any , lowercase_ : Conversation) -> Dict:
"""simple docstring"""
_UpperCamelCase = self.tokenizer.eos_token_id
_UpperCamelCase = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_) + [eos_token_id])
else:
input_ids.extend(self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_))
if len(lowercase_) > self.tokenizer.model_max_length:
_UpperCamelCase = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 82 | import logging
from transformers import PretrainedConfig
lowerCamelCase__ = logging.getLogger(__name__)
lowerCamelCase__ = {
'''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''',
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''bertabs'''
def __init__( self : List[str] , lowercase_ : int=30522 , lowercase_ : str=512 , lowercase_ : int=6 , lowercase_ : Optional[Any]=512 , lowercase_ : Optional[Any]=8 , lowercase_ : Optional[int]=512 , lowercase_ : Tuple=0.2 , lowercase_ : Union[str, Any]=6 , lowercase_ : List[Any]=768 , lowercase_ : List[str]=8 , lowercase_ : int=2048 , lowercase_ : Tuple=0.2 , **lowercase_ : str , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**lowercase_)
_UpperCamelCase = vocab_size
_UpperCamelCase = max_pos
_UpperCamelCase = enc_layers
_UpperCamelCase = enc_hidden_size
_UpperCamelCase = enc_heads
_UpperCamelCase = enc_ff_size
_UpperCamelCase = enc_dropout
_UpperCamelCase = dec_layers
_UpperCamelCase = dec_hidden_size
_UpperCamelCase = dec_heads
_UpperCamelCase = dec_ff_size
_UpperCamelCase = dec_dropout
| 82 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/config.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/config.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/config.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/config.json''',
'''bert-base-multilingual-uncased''': '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json''',
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/config.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/config.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-base-cased-finetuned-mrpc''': '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json''',
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json''',
'''bert-base-german-dbmdz-uncased''': '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese''': '''https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'''
),
'''wietsedv/bert-base-dutch-cased''': '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json''',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''bert'''
def __init__( self : Union[str, Any] , lowercase_ : Tuple=30522 , lowercase_ : Any=768 , lowercase_ : Dict=12 , lowercase_ : Optional[Any]=12 , lowercase_ : Tuple=3072 , lowercase_ : Union[str, Any]="gelu" , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Optional[int]=0.1 , lowercase_ : Any=512 , lowercase_ : Dict=2 , lowercase_ : Optional[int]=0.02 , lowercase_ : Tuple=1e-1_2 , lowercase_ : Union[str, Any]=0 , lowercase_ : str="absolute" , lowercase_ : int=True , lowercase_ : str=None , **lowercase_ : Dict , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(pad_token_id=lowercase_ , **lowercase_)
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_act
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = position_embedding_type
_UpperCamelCase = use_cache
_UpperCamelCase = classifier_dropout
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self : Dict) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_UpperCamelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCamelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
])
| 82 | from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowerCamelCase__ = input('''Enter image url: ''').strip()
print(F"Downloading image from {url} ...")
lowerCamelCase__ = BeautifulSoup(requests.get(url).content, '''html.parser''')
# The image URL is in the content field of the first meta tag with property og:image
lowerCamelCase__ = soup.find('''meta''', {'''property''': '''og:image'''})['''content''']
lowerCamelCase__ = requests.get(image_url).content
lowerCamelCase__ = F"{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"
with open(file_name, '''wb''') as fp:
fp.write(image_data)
print(F"Done. Image saved to disk as {file_name}.")
| 82 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase__ = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''UniSpeechForCTC''',
'''UniSpeechForPreTraining''',
'''UniSpeechForSequenceClassification''',
'''UniSpeechModel''',
'''UniSpeechPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 82 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'''
),
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''dpr'''
def __init__( self : Optional[Any] , lowercase_ : int=30522 , lowercase_ : str=768 , lowercase_ : List[Any]=12 , lowercase_ : Dict=12 , lowercase_ : str=3072 , lowercase_ : Any="gelu" , lowercase_ : Any=0.1 , lowercase_ : Any=0.1 , lowercase_ : str=512 , lowercase_ : str=2 , lowercase_ : List[Any]=0.02 , lowercase_ : Dict=1e-1_2 , lowercase_ : List[str]=0 , lowercase_ : Union[str, Any]="absolute" , lowercase_ : int = 0 , **lowercase_ : int , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=lowercase_ , **lowercase_)
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_act
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = projection_dim
_UpperCamelCase = position_embedding_type
| 82 | 1 |
import os
def lowerCAmelCase__ ( ) ->Any:
'''simple docstring'''
with open(os.path.dirname(a__ ) + "/p022_names.txt" ) as file:
_UpperCamelCase = str(file.readlines()[0] )
_UpperCamelCase = names.replace("\"" , "" ).split("," )
names.sort()
_UpperCamelCase = 0
_UpperCamelCase = 0
for i, name in enumerate(a__ ):
for letter in name:
name_score += ord(a__ ) - 64
total_score += (i + 1) * name_score
_UpperCamelCase = 0
return total_score
if __name__ == "__main__":
print(solution())
| 82 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
'''configuration_table_transformer''': [
'''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TableTransformerConfig''',
'''TableTransformerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TableTransformerForObjectDetection''',
'''TableTransformerModel''',
'''TableTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 82 | 1 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->Tuple:
'''simple docstring'''
_UpperCamelCase = BertConfig.from_json_file(a__ )
print(f'Building PyTorch model from configuration: {config}' )
_UpperCamelCase = BertForPreTraining(a__ )
# Load weights from tf checkpoint
load_tf_weights_in_bert(a__ , a__ , a__ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , a__ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 82 | import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowerCamelCase__ = {
'''cola''': 2,
'''mnli''': 3,
'''mrpc''': 2,
'''sst-2''': 2,
'''sts-b''': 1,
'''qqp''': 2,
'''qnli''': 2,
'''rte''': 2,
'''wnli''': 2,
}
logging.set_verbosity_info()
def lowerCAmelCase__ ( a__ , a__ , a__ , a__=None ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase = XLNetConfig.from_json_file(a__ )
_UpperCamelCase = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f'Building PyTorch XLNetForSequenceClassification model from configuration: {config}' )
_UpperCamelCase = finetuning_task
_UpperCamelCase = GLUE_TASKS_NUM_LABELS[finetuning_task]
_UpperCamelCase = XLNetForSequenceClassification(a__ )
elif "squad" in finetuning_task:
_UpperCamelCase = finetuning_task
_UpperCamelCase = XLNetForQuestionAnswering(a__ )
else:
_UpperCamelCase = XLNetLMHeadModel(a__ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(a__ , a__ , a__ )
# Save pytorch-model
_UpperCamelCase = os.path.join(a__ , a__ )
_UpperCamelCase = os.path.join(a__ , a__ )
print(f'Save PyTorch model to {os.path.abspath(a__ )}' )
torch.save(model.state_dict() , a__ )
print(f'Save configuration file to {os.path.abspath(a__ )}' )
with open(a__ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--xlnet_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained XLNet model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--finetuning_task''',
default=None,
type=str,
help='''Name of a task on which the XLNet TensorFlow model was fine-tuned''',
)
lowerCamelCase__ = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 82 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
'''configuration_blip_2''': [
'''BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Blip2Config''',
'''Blip2QFormerConfig''',
'''Blip2VisionConfig''',
],
'''processing_blip_2''': ['''Blip2Processor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Blip2Model''',
'''Blip2QFormerModel''',
'''Blip2PreTrainedModel''',
'''Blip2ForConditionalGeneration''',
'''Blip2VisionModel''',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 82 | import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _UpperCAmelCase ( pl.LightningModule ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase_ : Tuple) -> int:
"""simple docstring"""
super().__init__()
_UpperCamelCase = model
_UpperCamelCase = 2
_UpperCamelCase = nn.Linear(self.model.config.hidden_size , self.num_labels)
def __UpperCAmelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
pass
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->str:
'''simple docstring'''
_UpperCamelCase = LongformerModel.from_pretrained(a__ )
_UpperCamelCase = LightningModel(a__ )
_UpperCamelCase = torch.load(a__ , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
_UpperCamelCase = LongformerForQuestionAnswering.from_pretrained(a__ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(a__ )
print(f'Conversion successful. Model saved under {pytorch_dump_folder_path}' )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase__ = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 82 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( a__ ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase = "huggingface/label-files"
_UpperCamelCase = "imagenet-1k-id2label.json"
_UpperCamelCase = json.load(open(hf_hub_download(a__ , a__ , repo_type="dataset" ) , "r" ) )
_UpperCamelCase = {int(a__ ): v for k, v in idalabel.items()}
_UpperCamelCase = {v: k for k, v in idalabel.items()}
_UpperCamelCase = "std_conv" if "bit" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
_UpperCamelCase = BitConfig(
conv_layer=a__ , num_labels=1_000 , idalabel=a__ , labelaid=a__ , )
return config
def lowerCAmelCase__ ( a__ ) ->Dict:
'''simple docstring'''
if "stem.conv" in name:
_UpperCamelCase = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
_UpperCamelCase = name.replace("blocks" , "layers" )
if "head.fc" in name:
_UpperCamelCase = name.replace("head.fc" , "classifier.1" )
if name.startswith("norm" ):
_UpperCamelCase = "bit." + name
if "bit" not in name and "classifier" not in name:
_UpperCamelCase = "bit.encoder." + name
return name
def lowerCAmelCase__ ( ) ->Dict:
'''simple docstring'''
_UpperCamelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCamelCase = Image.open(requests.get(a__ , stream=a__ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase__ ( a__ , a__ , a__=False ) ->Optional[int]:
'''simple docstring'''
_UpperCamelCase = get_config(a__ )
# load original model from timm
_UpperCamelCase = create_model(a__ , pretrained=a__ )
timm_model.eval()
# load state_dict of original model
_UpperCamelCase = timm_model.state_dict()
for key in state_dict.copy().keys():
_UpperCamelCase = state_dict.pop(a__ )
_UpperCamelCase = val.squeeze() if "head" in key else val
# load HuggingFace model
_UpperCamelCase = BitForImageClassification(a__ )
model.eval()
model.load_state_dict(a__ )
# create image processor
_UpperCamelCase = create_transform(**resolve_data_config({} , model=a__ ) )
_UpperCamelCase = transform.transforms
_UpperCamelCase = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
_UpperCamelCase = BitImageProcessor(
do_resize=a__ , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=a__ , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=a__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
_UpperCamelCase = prepare_img()
_UpperCamelCase = transform(a__ ).unsqueeze(0 )
_UpperCamelCase = processor(a__ , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(a__ , a__ )
# verify logits
with torch.no_grad():
_UpperCamelCase = model(a__ )
_UpperCamelCase = outputs.logits
print("Logits:" , logits[0, :3] )
print("Predicted class:" , model.config.idalabel[logits.argmax(-1 ).item()] )
_UpperCamelCase = timm_model(a__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(a__ , outputs.logits , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(a__ ).mkdir(exist_ok=a__ )
print(f'Saving model {model_name} and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(a__ )
processor.save_pretrained(a__ )
if push_to_hub:
print(f'Pushing model {model_name} and processor to the hub' )
model.push_to_hub(f'ybelkada/{model_name}' )
processor.push_to_hub(f'ybelkada/{model_name}' )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''resnetv2_50x1_bitm''',
type=str,
help='''Name of the BiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model to the hub.''',
)
lowerCamelCase__ = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 82 | import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowerCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : str , *lowercase_ : List[str] , **lowercase_ : Union[str, Any]) -> None:
"""simple docstring"""
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_)
| 82 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/config.json''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/config.json''',
'''funnel-transformer/medium-base''': '''https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json''',
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/config.json''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json''',
'''funnel-transformer/xlarge-base''': '''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json''',
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''funnel'''
__A = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
}
def __init__( self : str , lowercase_ : Optional[Any]=30522 , lowercase_ : Union[str, Any]=[4, 4, 4] , lowercase_ : int=None , lowercase_ : List[str]=2 , lowercase_ : Optional[Any]=768 , lowercase_ : Tuple=12 , lowercase_ : int=64 , lowercase_ : List[Any]=3072 , lowercase_ : Optional[Any]="gelu_new" , lowercase_ : Optional[int]=0.1 , lowercase_ : Optional[int]=0.1 , lowercase_ : str=0.0 , lowercase_ : Dict=0.1 , lowercase_ : str=None , lowercase_ : Dict=1e-9 , lowercase_ : List[str]="mean" , lowercase_ : int="relative_shift" , lowercase_ : Tuple=True , lowercase_ : Dict=True , lowercase_ : Any=True , **lowercase_ : Union[str, Any] , ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = vocab_size
_UpperCamelCase = block_sizes
_UpperCamelCase = [1] * len(lowercase_) if block_repeats is None else block_repeats
assert len(lowercase_) == len(
self.block_repeats), "`block_sizes` and `block_repeats` should have the same length."
_UpperCamelCase = num_decoder_layers
_UpperCamelCase = d_model
_UpperCamelCase = n_head
_UpperCamelCase = d_head
_UpperCamelCase = d_inner
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = initializer_range
_UpperCamelCase = initializer_std
_UpperCamelCase = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], f'Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.'
_UpperCamelCase = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], f'Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.'
_UpperCamelCase = attention_type
_UpperCamelCase = separate_cls
_UpperCamelCase = truncate_seq
_UpperCamelCase = pool_q_only
super().__init__(**lowercase_)
@property
def __UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
return sum(self.block_sizes)
@num_hidden_layers.setter
def __UpperCAmelCase ( self : Dict , lowercase_ : Optional[Any]) -> str:
"""simple docstring"""
raise NotImplementedError(
"This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.")
@property
def __UpperCAmelCase ( self : Any) -> Dict:
"""simple docstring"""
return len(self.block_sizes)
@num_blocks.setter
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : List[str]) -> List[Any]:
"""simple docstring"""
raise NotImplementedError("This model does not support the setting of `num_blocks`. Please set `block_sizes`.")
| 82 | import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( enum.Enum ):
'''simple docstring'''
__A = 0
__A = 1
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''generated'''
def __init__( self : Any , *lowercase_ : Dict , **lowercase_ : Tuple) -> List[Any]:
"""simple docstring"""
super().__init__(*lowercase_ , **lowercase_)
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING)
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : Union[str, Any]=None , lowercase_ : Optional[Any]=None , lowercase_ : Optional[int]=None , lowercase_ : Optional[Any]=None , lowercase_ : Any=None , lowercase_ : Union[str, Any]=None , **lowercase_ : Optional[Any] , ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = {}
if truncation is not None:
_UpperCamelCase = truncation
_UpperCamelCase = generate_kwargs
_UpperCamelCase = {}
if return_tensors is not None and return_type is None:
_UpperCamelCase = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
_UpperCamelCase = return_type
if clean_up_tokenization_spaces is not None:
_UpperCamelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
_UpperCamelCase = self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_)
if len(lowercase_) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim.")
_UpperCamelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __UpperCAmelCase ( self : int , lowercase_ : int , lowercase_ : int , lowercase_ : int) -> Any:
"""simple docstring"""
return True
def __UpperCAmelCase ( self : Dict , *lowercase_ : List[str] , lowercase_ : List[Any]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = self.model.config.prefix if self.model.config.prefix is not None else ""
if isinstance(args[0] , lowercase_):
if self.tokenizer.pad_token_id is None:
raise ValueError("Please make sure that the tokenizer has a pad_token_id when using a batch input")
_UpperCamelCase = ([prefix + arg for arg in args[0]],)
_UpperCamelCase = True
elif isinstance(args[0] , lowercase_):
_UpperCamelCase = (prefix + args[0],)
_UpperCamelCase = False
else:
raise ValueError(
f' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`')
_UpperCamelCase = self.tokenizer(*lowercase_ , padding=lowercase_ , truncation=lowercase_ , return_tensors=self.framework)
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : List[Any] , *lowercase_ : Any , **lowercase_ : int) -> Dict:
"""simple docstring"""
_UpperCamelCase = super().__call__(*lowercase_ , **lowercase_)
if (
isinstance(args[0] , lowercase_)
and all(isinstance(lowercase_ , lowercase_) for el in args[0])
and all(len(lowercase_) == 1 for res in result)
):
return [res[0] for res in result]
return result
def __UpperCAmelCase ( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : str=TruncationStrategy.DO_NOT_TRUNCATE , **lowercase_ : Dict) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = self._parse_and_tokenize(lowercase_ , truncation=lowercase_ , **lowercase_)
return inputs
def __UpperCAmelCase ( self : str , lowercase_ : str , **lowercase_ : str) -> str:
"""simple docstring"""
if self.framework == "pt":
_UpperCamelCase , _UpperCamelCase = model_inputs["input_ids"].shape
elif self.framework == "tf":
_UpperCamelCase , _UpperCamelCase = tf.shape(model_inputs["input_ids"]).numpy()
_UpperCamelCase = generate_kwargs.get("min_length" , self.model.config.min_length)
_UpperCamelCase = generate_kwargs.get("max_length" , self.model.config.max_length)
self.check_inputs(lowercase_ , generate_kwargs["min_length"] , generate_kwargs["max_length"])
_UpperCamelCase = self.model.generate(**lowercase_ , **lowercase_)
_UpperCamelCase = output_ids.shape[0]
if self.framework == "pt":
_UpperCamelCase = output_ids.reshape(lowercase_ , out_b // in_b , *output_ids.shape[1:])
elif self.framework == "tf":
_UpperCamelCase = tf.reshape(lowercase_ , (in_b, out_b // in_b, *output_ids.shape[1:]))
return {"output_ids": output_ids}
def __UpperCAmelCase ( self : Dict , lowercase_ : str , lowercase_ : int=ReturnType.TEXT , lowercase_ : int=False) -> Tuple:
"""simple docstring"""
_UpperCamelCase = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
_UpperCamelCase = {f'{self.return_name}_token_ids': output_ids}
elif return_type == ReturnType.TEXT:
_UpperCamelCase = {
f'{self.return_name}_text': self.tokenizer.decode(
lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ , )
}
records.append(lowercase_)
return records
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''summary'''
def __call__( self : Optional[Any] , *lowercase_ : int , **lowercase_ : Dict) -> Optional[int]:
"""simple docstring"""
return super().__call__(*lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : List[str] , lowercase_ : int , lowercase_ : int , lowercase_ : int) -> bool:
"""simple docstring"""
if max_length < min_length:
logger.warning(f'Your min_length={min_length} must be inferior than your max_length={max_length}.')
if input_length < max_length:
logger.warning(
f'Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '
"a summarization task, where outputs shorter than the input are typically wanted, you might "
f'consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})')
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''translation'''
def __UpperCAmelCase ( self : Dict , lowercase_ : int , lowercase_ : int , lowercase_ : int) -> int:
"""simple docstring"""
if input_length > 0.9 * max_length:
logger.warning(
f'Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '
"increasing your max_length manually, e.g. translator('...', max_length=400)")
return True
def __UpperCAmelCase ( self : Tuple , *lowercase_ : Any , lowercase_ : List[Any]=TruncationStrategy.DO_NOT_TRUNCATE , lowercase_ : Any=None , lowercase_ : Optional[Any]=None) -> List[str]:
"""simple docstring"""
if getattr(self.tokenizer , "_build_translation_inputs" , lowercase_):
return self.tokenizer._build_translation_inputs(
*lowercase_ , return_tensors=self.framework , truncation=lowercase_ , src_lang=lowercase_ , tgt_lang=lowercase_)
else:
return super()._parse_and_tokenize(*lowercase_ , truncation=lowercase_)
def __UpperCAmelCase ( self : List[str] , lowercase_ : Dict=None , lowercase_ : str=None , **lowercase_ : List[Any]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = super()._sanitize_parameters(**lowercase_)
if src_lang is not None:
_UpperCamelCase = src_lang
if tgt_lang is not None:
_UpperCamelCase = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
_UpperCamelCase = kwargs.get("task" , self.task)
_UpperCamelCase = task.split("_")
if task and len(lowercase_) == 4:
# translation, XX, to YY
_UpperCamelCase = items[1]
_UpperCamelCase = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[str] , *lowercase_ : List[str] , **lowercase_ : str) -> Union[str, Any]:
"""simple docstring"""
return super().__call__(*lowercase_ , **lowercase_)
| 82 | 1 |
from __future__ import annotations
def lowerCAmelCase__ ( a__ ) ->bool:
'''simple docstring'''
return len(set(a__ ) ) == len(a__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'''vocab_file''': '''spiece.model'''}
lowerCamelCase__ = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
}
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCamelCase__ = {
'''t5-small''': 512,
'''t5-base''': 512,
'''t5-large''': 512,
'''t5-3b''': 512,
'''t5-11b''': 512,
}
lowerCamelCase__ = '''▁'''
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , lowercase_ : int , lowercase_ : str="</s>" , lowercase_ : Optional[Any]="<unk>" , lowercase_ : Dict="<pad>" , lowercase_ : Tuple=100 , lowercase_ : str=None , lowercase_ : Optional[Dict[str, Any]] = None , lowercase_ : str=True , **lowercase_ : Optional[Any] , ) -> None:
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
_UpperCamelCase = [f'<extra_id_{i}>' for i in range(lowercase_)]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_UpperCamelCase = len(set(filter(lambda lowercase_: bool("extra_id" in str(lowercase_)) , lowercase_)))
if extra_tokens != extra_ids:
raise ValueError(
f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens")
if legacy:
logger.warning_once(
f'You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'
" read the related pull request available at https://github.com/huggingface/transformers/pull/24565")
_UpperCamelCase = legacy
_UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , extra_ids=lowercase_ , additional_special_tokens=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , legacy=lowercase_ , **lowercase_ , )
_UpperCamelCase = vocab_file
_UpperCamelCase = extra_ids
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(lowercase_)
@staticmethod
def __UpperCAmelCase ( lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : str) -> Any:
"""simple docstring"""
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
_UpperCamelCase = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
f' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
f' {pretrained_model_name_or_path} automatically truncating your input to'
f' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
f' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , lowercase_ , )
return max_model_length
@property
def __UpperCAmelCase ( self : Dict) -> Optional[int]:
"""simple docstring"""
return self.sp_model.get_piece_size() + self._extra_ids
def __UpperCAmelCase ( self : Dict) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = {self.convert_ids_to_tokens(lowercase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __UpperCAmelCase ( self : Dict , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_)
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowercase_)) + [1]
return ([0] * len(lowercase_)) + [1] + ([0] * len(lowercase_)) + [1]
def __UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
return list(
set(filter(lambda lowercase_: bool(re.search(R"<extra_id_\d+>" , lowercase_)) is not None , self.additional_special_tokens)))
def __UpperCAmelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
return [self._convert_token_to_id(lowercase_) for token in self.get_sentinel_tokens()]
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : List[int]) -> List[int]:
"""simple docstring"""
if len(lowercase_) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
" eos tokens being added.")
return token_ids
else:
return token_ids + [self.eos_token_id]
def __UpperCAmelCase ( self : List[str] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCamelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos) * [0]
return len(token_ids_a + eos + token_ids_a + eos) * [0]
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCamelCase = self._add_eos_if_not_present(lowercase_)
if token_ids_a is None:
return token_ids_a
else:
_UpperCamelCase = self._add_eos_if_not_present(lowercase_)
return token_ids_a + token_ids_a
def __getstate__( self : Tuple) -> Any:
"""simple docstring"""
_UpperCamelCase = self.__dict__.copy()
_UpperCamelCase = None
return state
def __setstate__( self : Optional[Any] , lowercase_ : Any) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs"):
_UpperCamelCase = {}
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def __UpperCAmelCase ( self : int , lowercase_ : "TextInput" , **lowercase_ : Optional[int]) -> List[str]:
"""simple docstring"""
if not self.legacy:
_UpperCamelCase = SPIECE_UNDERLINE + text.replace(lowercase_ , " ")
return super().tokenize(lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : int , **lowercase_ : Optional[int]) -> List[str]:
"""simple docstring"""
if not self.legacy:
_UpperCamelCase = text.startswith(lowercase_)
if is_first:
_UpperCamelCase = text[1:]
_UpperCamelCase = self.sp_model.encode(lowercase_ , out_type=lowercase_)
if not self.legacy and not is_first and not text.startswith(" ") and tokens[0].startswith(lowercase_):
_UpperCamelCase = ([tokens[0][1:]] if len(tokens[0]) > 1 else []) + tokens[1:]
return tokens
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Optional[Any]) -> List[Any]:
"""simple docstring"""
if token.startswith("<extra_id_"):
_UpperCamelCase = re.match(R"<extra_id_(\d+)>" , lowercase_)
_UpperCamelCase = int(match.group(1))
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(lowercase_)
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Any) -> int:
"""simple docstring"""
if index < self.sp_model.get_piece_size():
_UpperCamelCase = self.sp_model.IdToPiece(lowercase_)
else:
_UpperCamelCase = f'<extra_id_{self.vocab_size - 1 - index}>'
return token
def __UpperCAmelCase ( self : Dict , lowercase_ : Optional[int]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = []
_UpperCamelCase = ""
_UpperCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase_) + token
_UpperCamelCase = True
_UpperCamelCase = []
else:
current_sub_tokens.append(lowercase_)
_UpperCamelCase = False
out_string += self.sp_model.decode(lowercase_)
return out_string.strip()
def __UpperCAmelCase ( self : List[str] , lowercase_ : str , lowercase_ : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowercase_):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
_UpperCamelCase = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowercase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowercase_)
elif not os.path.isfile(self.vocab_file):
with open(lowercase_ , "wb") as fi:
_UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(lowercase_)
return (out_vocab_file,)
| 82 | 1 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
lowerCamelCase__ = re.compile(R'''\b(a|an|the)\b''', re.UNICODE)
lowerCamelCase__ = None
def lowerCAmelCase__ ( ) ->Any:
'''simple docstring'''
_UpperCamelCase = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." )
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." )
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." )
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." )
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." )
parser.add_argument(
"--na-prob-thresh" , "-t" , type=a__ , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=a__ , help="Save precision-recall curves to directory." )
parser.add_argument("--verbose" , "-v" , action="store_true" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
_UpperCamelCase = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
_UpperCamelCase = bool(qa["answers"]["text"] )
return qid_to_has_ans
def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
def remove_articles(a__ ):
return ARTICLES_REGEX.sub(" " , a__ )
def white_space_fix(a__ ):
return " ".join(text.split() )
def remove_punc(a__ ):
_UpperCamelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(a__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(a__ ) ) ) )
def lowerCAmelCase__ ( a__ ) ->List[Any]:
'''simple docstring'''
if not s:
return []
return normalize_answer(a__ ).split()
def lowerCAmelCase__ ( a__ , a__ ) ->Union[str, Any]:
'''simple docstring'''
return int(normalize_answer(a__ ) == normalize_answer(a__ ) )
def lowerCAmelCase__ ( a__ , a__ ) ->str:
'''simple docstring'''
_UpperCamelCase = get_tokens(a__ )
_UpperCamelCase = get_tokens(a__ )
_UpperCamelCase = collections.Counter(a__ ) & collections.Counter(a__ )
_UpperCamelCase = sum(common.values() )
if len(a__ ) == 0 or len(a__ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
_UpperCamelCase = 1.0 * num_same / len(a__ )
_UpperCamelCase = 1.0 * num_same / len(a__ )
_UpperCamelCase = (2 * precision * recall) / (precision + recall)
return fa
def lowerCAmelCase__ ( a__ , a__ ) ->Optional[int]:
'''simple docstring'''
_UpperCamelCase = {}
_UpperCamelCase = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
_UpperCamelCase = qa["id"]
_UpperCamelCase = [t for t in qa["answers"]["text"] if normalize_answer(a__ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
_UpperCamelCase = [""]
if qid not in preds:
print(f'Missing prediction for {qid}' )
continue
_UpperCamelCase = preds[qid]
# Take max over all gold answers
_UpperCamelCase = max(compute_exact(a__ , a__ ) for a in gold_answers )
_UpperCamelCase = max(compute_fa(a__ , a__ ) for a in gold_answers )
return exact_scores, fa_scores
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ ) ->Tuple:
'''simple docstring'''
_UpperCamelCase = {}
for qid, s in scores.items():
_UpperCamelCase = na_probs[qid] > na_prob_thresh
if pred_na:
_UpperCamelCase = float(not qid_to_has_ans[qid] )
else:
_UpperCamelCase = s
return new_scores
def lowerCAmelCase__ ( a__ , a__ , a__=None ) ->Tuple:
'''simple docstring'''
if not qid_list:
_UpperCamelCase = len(a__ )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values() ) / total),
("f1", 100.0 * sum(fa_scores.values() ) / total),
("total", total),
] )
else:
_UpperCamelCase = len(a__ )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("total", total),
] )
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->Union[str, Any]:
'''simple docstring'''
for k in new_eval:
_UpperCamelCase = new_eval[k]
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ ) ->str:
'''simple docstring'''
plt.step(a__ , a__ , color="b" , alpha=0.2 , where="post" )
plt.fill_between(a__ , a__ , step="post" , alpha=0.2 , color="b" )
plt.xlabel("Recall" )
plt.ylabel("Precision" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(a__ )
plt.savefig(a__ )
plt.clf()
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ , a__=None , a__=None ) ->List[Any]:
'''simple docstring'''
_UpperCamelCase = sorted(a__ , key=lambda a__ : na_probs[k] )
_UpperCamelCase = 0.0
_UpperCamelCase = 1.0
_UpperCamelCase = 0.0
_UpperCamelCase = [1.0]
_UpperCamelCase = [0.0]
_UpperCamelCase = 0.0
for i, qid in enumerate(a__ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
_UpperCamelCase = true_pos / float(i + 1 )
_UpperCamelCase = true_pos / float(a__ )
if i == len(a__ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(a__ )
recalls.append(a__ )
if out_image:
plot_pr_curve(a__ , a__ , a__ , a__ )
return {"ap": 100.0 * avg_prec}
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ , a__ , a__ ) ->Any:
'''simple docstring'''
if out_image_dir and not os.path.exists(a__ ):
os.makedirs(a__ )
_UpperCamelCase = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
_UpperCamelCase = make_precision_recall_eval(
a__ , a__ , a__ , a__ , out_image=os.path.join(a__ , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , )
_UpperCamelCase = make_precision_recall_eval(
a__ , a__ , a__ , a__ , out_image=os.path.join(a__ , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , )
_UpperCamelCase = {k: float(a__ ) for k, v in qid_to_has_ans.items()}
_UpperCamelCase = make_precision_recall_eval(
a__ , a__ , a__ , a__ , out_image=os.path.join(a__ , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(a__ , a__ , "pr_exact" )
merge_eval(a__ , a__ , "pr_f1" )
merge_eval(a__ , a__ , "pr_oracle" )
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ ) ->Optional[int]:
'''simple docstring'''
if not qid_list:
return
_UpperCamelCase = [na_probs[k] for k in qid_list]
_UpperCamelCase = np.ones_like(a__ ) / float(len(a__ ) )
plt.hist(a__ , weights=a__ , bins=20 , range=(0.0, 1.0) )
plt.xlabel("Model probability of no-answer" )
plt.ylabel("Proportion of dataset" )
plt.title(f'Histogram of no-answer probability: {name}' )
plt.savefig(os.path.join(a__ , f'na_prob_hist_{name}.png' ) )
plt.clf()
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ ) ->List[str]:
'''simple docstring'''
_UpperCamelCase = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
_UpperCamelCase = num_no_ans
_UpperCamelCase = cur_score
_UpperCamelCase = 0.0
_UpperCamelCase = sorted(a__ , key=lambda a__ : na_probs[k] )
for i, qid in enumerate(a__ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
_UpperCamelCase = scores[qid]
else:
if preds[qid]:
_UpperCamelCase = -1
else:
_UpperCamelCase = 0
cur_score += diff
if cur_score > best_score:
_UpperCamelCase = cur_score
_UpperCamelCase = na_probs[qid]
return 100.0 * best_score / len(a__ ), best_thresh
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ , a__ , a__ ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = find_best_thresh(a__ , a__ , a__ , a__ )
_UpperCamelCase , _UpperCamelCase = find_best_thresh(a__ , a__ , a__ , a__ )
_UpperCamelCase = best_exact
_UpperCamelCase = exact_thresh
_UpperCamelCase = best_fa
_UpperCamelCase = fa_thresh
def lowerCAmelCase__ ( ) ->List[Any]:
'''simple docstring'''
with open(OPTS.data_file ) as f:
_UpperCamelCase = json.load(a__ )
_UpperCamelCase = dataset_json["data"]
with open(OPTS.pred_file ) as f:
_UpperCamelCase = json.load(a__ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
_UpperCamelCase = json.load(a__ )
else:
_UpperCamelCase = {k: 0.0 for k in preds}
_UpperCamelCase = make_qid_to_has_ans(a__ ) # maps qid to True/False
_UpperCamelCase = [k for k, v in qid_to_has_ans.items() if v]
_UpperCamelCase = [k for k, v in qid_to_has_ans.items() if not v]
_UpperCamelCase , _UpperCamelCase = get_raw_scores(a__ , a__ )
_UpperCamelCase = apply_no_ans_threshold(a__ , a__ , a__ , OPTS.na_prob_thresh )
_UpperCamelCase = apply_no_ans_threshold(a__ , a__ , a__ , OPTS.na_prob_thresh )
_UpperCamelCase = make_eval_dict(a__ , a__ )
if has_ans_qids:
_UpperCamelCase = make_eval_dict(a__ , a__ , qid_list=a__ )
merge_eval(a__ , a__ , "HasAns" )
if no_ans_qids:
_UpperCamelCase = make_eval_dict(a__ , a__ , qid_list=a__ )
merge_eval(a__ , a__ , "NoAns" )
if OPTS.na_prob_file:
find_all_best_thresh(a__ , a__ , a__ , a__ , a__ , a__ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(a__ , a__ , a__ , a__ , a__ , OPTS.out_image_dir )
histogram_na_prob(a__ , a__ , OPTS.out_image_dir , "hasAns" )
histogram_na_prob(a__ , a__ , OPTS.out_image_dir , "noAns" )
if OPTS.out_file:
with open(OPTS.out_file , "w" ) as f:
json.dump(a__ , a__ )
else:
print(json.dumps(a__ , indent=2 ) )
if __name__ == "__main__":
lowerCamelCase__ = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('''Agg''')
import matplotlib.pyplot as plt
main()
| 82 | from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowerCAmelCase__ ( a__ ) ->str:
'''simple docstring'''
return getitem, k
def lowerCAmelCase__ ( a__ , a__ ) ->Tuple:
'''simple docstring'''
return setitem, k, v
def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
return delitem, k
def lowerCAmelCase__ ( a__ , a__ , *a__ ) ->List[str]:
'''simple docstring'''
try:
return fun(a__ , *a__ ), None
except Exception as e:
return None, e
lowerCamelCase__ = (
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
)
lowerCamelCase__ = [
_set('''key_a''', '''val_a'''),
_set('''key_a''', '''val_b'''),
]
lowerCamelCase__ = [
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
_del('''key_a'''),
_del('''key_b'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
]
lowerCamelCase__ = [
_get('''key_a'''),
_del('''key_a'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
_del('''key_a'''),
_get('''key_a'''),
]
lowerCamelCase__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
lowerCamelCase__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('''key_a''', '''val_b'''),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def lowerCAmelCase__ ( a__ ) ->Dict:
'''simple docstring'''
_UpperCamelCase = HashMap(initial_block_size=4 )
_UpperCamelCase = {}
for _, (fun, *args) in enumerate(a__ ):
_UpperCamelCase , _UpperCamelCase = _run_operation(a__ , a__ , *a__ )
_UpperCamelCase , _UpperCamelCase = _run_operation(a__ , a__ , *a__ )
assert my_res == py_res
assert str(a__ ) == str(a__ )
assert set(a__ ) == set(a__ )
assert len(a__ ) == len(a__ )
assert set(my.items() ) == set(py.items() )
def lowerCAmelCase__ ( ) ->List[Any]:
'''simple docstring'''
def is_public(a__ ) -> bool:
return not name.startswith("_" )
_UpperCamelCase = {name for name in dir({} ) if is_public(a__ )}
_UpperCamelCase = {name for name in dir(HashMap() ) if is_public(a__ )}
assert dict_public_names > hash_public_names
| 82 | 1 |
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
_UpperCamelCase = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(lowercase_ , "hidden_sizes"))
self.parent.assertTrue(hasattr(lowercase_ , "num_attention_heads"))
self.parent.assertTrue(hasattr(lowercase_ , "num_encoder_blocks"))
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase_ : List[Any] , lowercase_ : Tuple=13 , lowercase_ : List[Any]=64 , lowercase_ : List[Any]=3 , lowercase_ : Optional[Any]=4 , lowercase_ : Dict=[2, 2, 2, 2] , lowercase_ : Optional[Any]=[8, 4, 2, 1] , lowercase_ : str=[16, 32, 64, 128] , lowercase_ : Optional[Any]=[1, 4, 8, 16] , lowercase_ : Tuple=[1, 2, 4, 8] , lowercase_ : List[str]=True , lowercase_ : int=True , lowercase_ : Optional[int]="gelu" , lowercase_ : Dict=0.1 , lowercase_ : List[Any]=0.1 , lowercase_ : str=0.02 , lowercase_ : Optional[int]=3 , lowercase_ : List[str]=None , ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = image_size
_UpperCamelCase = num_channels
_UpperCamelCase = num_encoder_blocks
_UpperCamelCase = sr_ratios
_UpperCamelCase = depths
_UpperCamelCase = hidden_sizes
_UpperCamelCase = downsampling_rates
_UpperCamelCase = num_attention_heads
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = scope
def __UpperCAmelCase ( self : Any) -> Dict:
"""simple docstring"""
_UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
_UpperCamelCase = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self : Dict) -> Union[str, Any]:
"""simple docstring"""
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self : Any , lowercase_ : Dict , lowercase_ : List[str] , lowercase_ : str) -> List[str]:
"""simple docstring"""
_UpperCamelCase = SegformerModel(config=lowercase_)
model.to(lowercase_)
model.eval()
_UpperCamelCase = model(lowercase_)
_UpperCamelCase = _UpperCamelCase = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width))
def __UpperCAmelCase ( self : List[Any] , lowercase_ : List[Any] , lowercase_ : Tuple , lowercase_ : Dict) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = self.num_labels
_UpperCamelCase = SegformerForSemanticSegmentation(lowercase_)
model.to(lowercase_)
model.eval()
_UpperCamelCase = model(lowercase_)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4))
_UpperCamelCase = model(lowercase_ , labels=lowercase_)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4))
self.parent.assertGreater(result.loss , 0.0)
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : int) -> str:
"""simple docstring"""
_UpperCamelCase = 1
_UpperCamelCase = SegformerForSemanticSegmentation(config=lowercase_)
model.to(lowercase_)
model.eval()
_UpperCamelCase = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size)).to(lowercase_)
_UpperCamelCase = model(lowercase_ , labels=lowercase_)
self.parent.assertGreater(result.loss , 0.0)
def __UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( lowerCAmelCase, lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
__A = (
{
'''feature-extraction''': SegformerModel,
'''image-classification''': SegformerForImageClassification,
'''image-segmentation''': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__A = True
__A = False
__A = False
__A = False
def __UpperCAmelCase ( self : Optional[int]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = SegformerModelTester(self)
_UpperCamelCase = SegformerConfigTester(self , config_class=lowercase_)
def __UpperCAmelCase ( self : Tuple) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : str) -> Tuple:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def __UpperCAmelCase ( self : Dict) -> List[str]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*lowercase_)
def __UpperCAmelCase ( self : Any) -> Any:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*lowercase_)
@unittest.skip("SegFormer does not use inputs_embeds")
def __UpperCAmelCase ( self : Any) -> str:
"""simple docstring"""
pass
@unittest.skip("SegFormer does not have get_input_embeddings method and get_output_embeddings methods")
def __UpperCAmelCase ( self : str) -> List[str]:
"""simple docstring"""
pass
def __UpperCAmelCase ( self : Tuple) -> str:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(lowercase_)
_UpperCamelCase = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase_)
def __UpperCAmelCase ( self : Optional[int]) -> List[str]:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = True
for model_class in self.all_model_classes:
_UpperCamelCase = True
_UpperCamelCase = False
_UpperCamelCase = True
_UpperCamelCase = model_class(lowercase_)
model.to(lowercase_)
model.eval()
with torch.no_grad():
_UpperCamelCase = model(**self._prepare_for_class(lowercase_ , lowercase_))
_UpperCamelCase = outputs.attentions
_UpperCamelCase = sum(self.model_tester.depths)
self.assertEqual(len(lowercase_) , lowercase_)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_UpperCamelCase = True
_UpperCamelCase = model_class(lowercase_)
model.to(lowercase_)
model.eval()
with torch.no_grad():
_UpperCamelCase = model(**self._prepare_for_class(lowercase_ , lowercase_))
_UpperCamelCase = outputs.attentions
self.assertEqual(len(lowercase_) , lowercase_)
# verify the first attentions (first block, first layer)
_UpperCamelCase = (self.model_tester.image_size // 4) ** 2
_UpperCamelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
_UpperCamelCase = (self.model_tester.image_size // 32) ** 2
_UpperCamelCase = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:]) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
_UpperCamelCase = len(lowercase_)
# Check attention is always last and order is fine
_UpperCamelCase = True
_UpperCamelCase = True
_UpperCamelCase = model_class(lowercase_)
model.to(lowercase_)
model.eval()
with torch.no_grad():
_UpperCamelCase = model(**self._prepare_for_class(lowercase_ , lowercase_))
self.assertEqual(out_len + 1 , len(lowercase_))
_UpperCamelCase = outputs.attentions
self.assertEqual(len(lowercase_) , lowercase_)
# verify the first attentions (first block, first layer)
_UpperCamelCase = (self.model_tester.image_size // 4) ** 2
_UpperCamelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def __UpperCAmelCase ( self : List[Any]) -> List[Any]:
"""simple docstring"""
def check_hidden_states_output(lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : Optional[int]):
_UpperCamelCase = model_class(lowercase_)
model.to(lowercase_)
model.eval()
with torch.no_grad():
_UpperCamelCase = model(**self._prepare_for_class(lowercase_ , lowercase_))
_UpperCamelCase = outputs.hidden_states
_UpperCamelCase = self.model_tester.num_encoder_blocks
self.assertEqual(len(lowercase_) , lowercase_)
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:]) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_)
def __UpperCAmelCase ( self : Any) -> Optional[Any]:
"""simple docstring"""
if not self.model_tester.is_training:
return
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(lowercase_):
continue
_UpperCamelCase = model_class(lowercase_)
model.to(lowercase_)
model.train()
_UpperCamelCase = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_)
_UpperCamelCase = model(**lowercase_).loss
loss.backward()
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests.")
def __UpperCAmelCase ( self : Any) -> Union[str, Any]:
"""simple docstring"""
pass
@slow
def __UpperCAmelCase ( self : Dict) -> Optional[Any]:
"""simple docstring"""
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = SegformerModel.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
def lowerCAmelCase__ ( ) ->Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : List[Any]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=lowercase_ , align=lowercase_ , do_random_crop=lowercase_)
_UpperCamelCase = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512").to(
lowercase_)
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=lowercase_ , return_tensors="pt")
_UpperCamelCase = encoded_inputs.pixel_values.to(lowercase_)
with torch.no_grad():
_UpperCamelCase = model(lowercase_)
_UpperCamelCase = torch.Size((1, model.config.num_labels, 128, 128))
self.assertEqual(outputs.logits.shape , lowercase_)
_UpperCamelCase = torch.tensor(
[
[[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]],
[[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]],
[[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]],
]).to(lowercase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , lowercase_ , atol=1e-4))
@slow
def __UpperCAmelCase ( self : Tuple) -> List[str]:
"""simple docstring"""
_UpperCamelCase = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=lowercase_ , align=lowercase_ , do_random_crop=lowercase_)
_UpperCamelCase = SegformerForSemanticSegmentation.from_pretrained(
"nvidia/segformer-b1-finetuned-cityscapes-1024-1024").to(lowercase_)
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=lowercase_ , return_tensors="pt")
_UpperCamelCase = encoded_inputs.pixel_values.to(lowercase_)
with torch.no_grad():
_UpperCamelCase = model(lowercase_)
_UpperCamelCase = torch.Size((1, model.config.num_labels, 128, 128))
self.assertEqual(outputs.logits.shape , lowercase_)
_UpperCamelCase = torch.tensor(
[
[[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]],
[[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]],
[[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]],
]).to(lowercase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , lowercase_ , atol=1e-1))
@slow
def __UpperCAmelCase ( self : Dict) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=lowercase_ , align=lowercase_ , do_random_crop=lowercase_)
_UpperCamelCase = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512").to(
lowercase_)
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=lowercase_ , return_tensors="pt")
_UpperCamelCase = encoded_inputs.pixel_values.to(lowercase_)
with torch.no_grad():
_UpperCamelCase = model(lowercase_)
_UpperCamelCase = outputs.logits.detach().cpu()
_UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=lowercase_ , target_sizes=[(500, 300)])
_UpperCamelCase = torch.Size((500, 300))
self.assertEqual(segmentation[0].shape , lowercase_)
_UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=lowercase_)
_UpperCamelCase = torch.Size((128, 128))
self.assertEqual(segmentation[0].shape , lowercase_)
| 82 | import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _UpperCAmelCase ( lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = KandinskyVaaControlnetImgaImgPipeline
__A = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
__A = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
__A = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
__A = False
@property
def __UpperCAmelCase ( self : List[Any]) -> Tuple:
"""simple docstring"""
return 32
@property
def __UpperCAmelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
return 32
@property
def __UpperCAmelCase ( self : Optional[int]) -> str:
"""simple docstring"""
return self.time_input_dim
@property
def __UpperCAmelCase ( self : List[str]) -> Any:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
return 100
@property
def __UpperCAmelCase ( self : Dict) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0)
_UpperCamelCase = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
_UpperCamelCase = UNetaDConditionModel(**lowercase_)
return model
@property
def __UpperCAmelCase ( self : int) -> Optional[int]:
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __UpperCAmelCase ( self : int) -> Dict:
"""simple docstring"""
torch.manual_seed(0)
_UpperCamelCase = VQModel(**self.dummy_movq_kwargs)
return model
def __UpperCAmelCase ( self : int) -> Any:
"""simple docstring"""
_UpperCamelCase = self.dummy_unet
_UpperCamelCase = self.dummy_movq
_UpperCamelCase = {
"num_train_timesteps": 1000,
"beta_schedule": "linear",
"beta_start": 0.0_00_85,
"beta_end": 0.0_12,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
_UpperCamelCase = DDIMScheduler(**lowercase_)
_UpperCamelCase = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __UpperCAmelCase ( self : str , lowercase_ : Dict , lowercase_ : List[str]=0) -> List[str]:
"""simple docstring"""
_UpperCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowercase_)).to(lowercase_)
_UpperCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to(
lowercase_)
# create init_image
_UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase_)).to(lowercase_)
_UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1)[0]
_UpperCamelCase = Image.fromarray(np.uinta(lowercase_)).convert("RGB").resize((256, 256))
# create hint
_UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase_)).to(lowercase_)
if str(lowercase_).startswith("mps"):
_UpperCamelCase = torch.manual_seed(lowercase_)
else:
_UpperCamelCase = torch.Generator(device=lowercase_).manual_seed(lowercase_)
_UpperCamelCase = {
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def __UpperCAmelCase ( self : Any) -> str:
"""simple docstring"""
_UpperCamelCase = "cpu"
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = self.pipeline_class(**lowercase_)
_UpperCamelCase = pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = pipe(**self.get_dummy_inputs(lowercase_))
_UpperCamelCase = output.images
_UpperCamelCase = pipe(
**self.get_dummy_inputs(lowercase_) , return_dict=lowercase_ , )[0]
_UpperCamelCase = image[0, -3:, -3:, -1]
_UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase = np.array(
[0.54_98_50_34, 0.55_50_93_65, 0.52_56_15_04, 0.5_57_04_94, 0.5_59_38_18, 0.5_26_39_79, 0.50_28_56_43, 0.5_06_98_46, 0.51_19_67_36])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : Union[str, Any]) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
_UpperCamelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy")
_UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png")
_UpperCamelCase = init_image.resize((512, 512))
_UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png")
_UpperCamelCase = torch.from_numpy(np.array(lowercase_)).float() / 2_55.0
_UpperCamelCase = hint.permute(2 , 0 , 1).unsqueeze(0)
_UpperCamelCase = "A robot, 4k photo"
_UpperCamelCase = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa)
pipe_prior.to(lowercase_)
_UpperCamelCase = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa)
_UpperCamelCase = pipeline.to(lowercase_)
pipeline.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = torch.Generator(device="cpu").manual_seed(0)
_UpperCamelCase , _UpperCamelCase = pipe_prior(
lowercase_ , image=lowercase_ , strength=0.85 , generator=lowercase_ , negative_prompt="" , ).to_tuple()
_UpperCamelCase = pipeline(
image=lowercase_ , image_embeds=lowercase_ , negative_image_embeds=lowercase_ , hint=lowercase_ , generator=lowercase_ , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type="np" , )
_UpperCamelCase = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(lowercase_ , lowercase_)
| 82 | 1 |
def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def lowerCAmelCase__ ( a__ ) ->bool:
'''simple docstring'''
_UpperCamelCase = 0
_UpperCamelCase = number
while duplicate > 0:
_UpperCamelCase , _UpperCamelCase = divmod(a__ , 10 )
fact_sum += factorial(a__ )
return fact_sum == number
if __name__ == "__main__":
print('''Program to check whether a number is a Krisnamurthy Number or not.''')
lowerCamelCase__ = int(input('''Enter number: ''').strip())
print(
F"{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number."
)
| 82 | def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
assert (
isinstance(a__ , a__ ) and number_of_steps > 0
), f'number_of_steps needs to be positive integer, your input {number_of_steps}'
if number_of_steps == 1:
return 1
_UpperCamelCase , _UpperCamelCase = 1, 1
for _ in range(number_of_steps - 1 ):
_UpperCamelCase , _UpperCamelCase = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | 1 |
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def lowerCAmelCase__ ( a__ = True , *a__ , **a__ ) ->Union[str, Any]:
'''simple docstring'''
if not is_tqdm_available():
raise ImportError("Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`." )
_UpperCamelCase = False
if main_process_only:
_UpperCamelCase = PartialState().local_process_index == 0
return _tqdm(*a__ , **a__ , disable=a__ )
| 82 | from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , **lowercase_ : Tuple) -> Any:
"""simple docstring"""
super().__init__(**lowercase_)
if self.framework == "tf":
raise ValueError(f'The {self.__class__} is only available in PyTorch.')
requires_backends(self , "vision")
self.check_model_type(lowercase_)
def __call__( self : str , lowercase_ : Union[str, "Image.Image", List[Dict[str, Any]]] , lowercase_ : Union[str, List[str]] = None , **lowercase_ : str , ) -> List[str]:
"""simple docstring"""
if "text_queries" in kwargs:
_UpperCamelCase = kwargs.pop("text_queries")
if isinstance(lowercase_ , (str, Image.Image)):
_UpperCamelCase = {"image": image, "candidate_labels": candidate_labels}
else:
_UpperCamelCase = image
_UpperCamelCase = super().__call__(lowercase_ , **lowercase_)
return results
def __UpperCAmelCase ( self : Any , **lowercase_ : int) -> List[str]:
"""simple docstring"""
_UpperCamelCase = {}
if "threshold" in kwargs:
_UpperCamelCase = kwargs["threshold"]
if "top_k" in kwargs:
_UpperCamelCase = kwargs["top_k"]
return {}, {}, postprocess_params
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Any) -> List[str]:
"""simple docstring"""
_UpperCamelCase = load_image(inputs["image"])
_UpperCamelCase = inputs["candidate_labels"]
if isinstance(lowercase_ , lowercase_):
_UpperCamelCase = candidate_labels.split(",")
_UpperCamelCase = torch.tensor([[image.height, image.width]] , dtype=torch.intaa)
for i, candidate_label in enumerate(lowercase_):
_UpperCamelCase = self.tokenizer(lowercase_ , return_tensors=self.framework)
_UpperCamelCase = self.image_processor(lowercase_ , return_tensors=self.framework)
yield {
"is_last": i == len(lowercase_) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def __UpperCAmelCase ( self : Dict , lowercase_ : Tuple) -> str:
"""simple docstring"""
_UpperCamelCase = model_inputs.pop("target_size")
_UpperCamelCase = model_inputs.pop("candidate_label")
_UpperCamelCase = model_inputs.pop("is_last")
_UpperCamelCase = self.model(**lowercase_)
_UpperCamelCase = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def __UpperCAmelCase ( self : int , lowercase_ : Tuple , lowercase_ : List[str]=0.1 , lowercase_ : int=None) -> List[str]:
"""simple docstring"""
_UpperCamelCase = []
for model_output in model_outputs:
_UpperCamelCase = model_output["candidate_label"]
_UpperCamelCase = BaseModelOutput(lowercase_)
_UpperCamelCase = self.image_processor.post_process_object_detection(
outputs=lowercase_ , threshold=lowercase_ , target_sizes=model_output["target_size"])[0]
for index in outputs["scores"].nonzero():
_UpperCamelCase = outputs["scores"][index].item()
_UpperCamelCase = self._get_bounding_box(outputs["boxes"][index][0])
_UpperCamelCase = {"score": score, "label": label, "box": box}
results.append(lowercase_)
_UpperCamelCase = sorted(lowercase_ , key=lambda lowercase_: x["score"] , reverse=lowercase_)
if top_k:
_UpperCamelCase = results[:top_k]
return results
def __UpperCAmelCase ( self : str , lowercase_ : "torch.Tensor") -> Dict[str, int]:
"""simple docstring"""
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch.")
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = box.int().tolist()
_UpperCamelCase = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 82 | 1 |
from collections import deque
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : int , lowercase_ : str , lowercase_ : int , lowercase_ : int) -> None:
"""simple docstring"""
_UpperCamelCase = process_name # process name
_UpperCamelCase = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
_UpperCamelCase = arrival_time
_UpperCamelCase = burst_time # remaining burst time
_UpperCamelCase = 0 # total time of the process wait in ready queue
_UpperCamelCase = 0 # time from arrival time to completion time
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : int , lowercase_ : list[int] , lowercase_ : deque[Process] , lowercase_ : int , ) -> None:
"""simple docstring"""
_UpperCamelCase = number_of_queues
# time slice of queues that round robin algorithm applied
_UpperCamelCase = time_slices
# unfinished process is in this ready_queue
_UpperCamelCase = queue
# current time
_UpperCamelCase = current_time
# finished process is in this sequence queue
_UpperCamelCase = deque()
def __UpperCAmelCase ( self : Optional[int]) -> list[str]:
"""simple docstring"""
_UpperCamelCase = []
for i in range(len(self.finish_queue)):
sequence.append(self.finish_queue[i].process_name)
return sequence
def __UpperCAmelCase ( self : Dict , lowercase_ : list[Process]) -> list[int]:
"""simple docstring"""
_UpperCamelCase = []
for i in range(len(lowercase_)):
waiting_times.append(queue[i].waiting_time)
return waiting_times
def __UpperCAmelCase ( self : Dict , lowercase_ : list[Process]) -> list[int]:
"""simple docstring"""
_UpperCamelCase = []
for i in range(len(lowercase_)):
turnaround_times.append(queue[i].turnaround_time)
return turnaround_times
def __UpperCAmelCase ( self : str , lowercase_ : list[Process]) -> list[int]:
"""simple docstring"""
_UpperCamelCase = []
for i in range(len(lowercase_)):
completion_times.append(queue[i].stop_time)
return completion_times
def __UpperCAmelCase ( self : Any , lowercase_ : deque[Process]) -> list[int]:
"""simple docstring"""
return [q.burst_time for q in queue]
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Process) -> int:
"""simple docstring"""
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : deque[Process]) -> deque[Process]:
"""simple docstring"""
_UpperCamelCase = deque() # sequence deque of finished process
while len(lowercase_) != 0:
_UpperCamelCase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(lowercase_)
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
_UpperCamelCase = 0
# set the process's turnaround time because it is finished
_UpperCamelCase = self.current_time - cp.arrival_time
# set the completion time
_UpperCamelCase = self.current_time
# add the process to queue that has finished queue
finished.append(lowercase_)
self.finish_queue.extend(lowercase_) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def __UpperCAmelCase ( self : List[Any] , lowercase_ : deque[Process] , lowercase_ : int) -> tuple[deque[Process], deque[Process]]:
"""simple docstring"""
_UpperCamelCase = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(lowercase_)):
_UpperCamelCase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(lowercase_)
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
_UpperCamelCase = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(lowercase_)
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
_UpperCamelCase = 0
# set the finish time
_UpperCamelCase = self.current_time
# update the process' turnaround time because it is finished
_UpperCamelCase = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(lowercase_)
self.finish_queue.extend(lowercase_) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def __UpperCAmelCase ( self : Any) -> deque[Process]:
"""simple docstring"""
for i in range(self.number_of_queues - 1):
_UpperCamelCase , _UpperCamelCase = self.round_robin(
self.ready_queue , self.time_slices[i])
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue)
return self.finish_queue
if __name__ == "__main__":
import doctest
lowerCamelCase__ = Process('''P1''', 0, 53)
lowerCamelCase__ = Process('''P2''', 0, 17)
lowerCamelCase__ = Process('''P3''', 0, 68)
lowerCamelCase__ = Process('''P4''', 0, 24)
lowerCamelCase__ = 3
lowerCamelCase__ = [17, 25]
lowerCamelCase__ = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])})
lowerCamelCase__ = Process('''P1''', 0, 53)
lowerCamelCase__ = Process('''P2''', 0, 17)
lowerCamelCase__ = Process('''P3''', 0, 68)
lowerCamelCase__ = Process('''P4''', 0, 24)
lowerCamelCase__ = 3
lowerCamelCase__ = [17, 25]
lowerCamelCase__ = deque([Pa, Pa, Pa, Pa])
lowerCamelCase__ = MLFQ(number_of_queues, time_slices, queue, 0)
lowerCamelCase__ = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F"waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print completion times of processes(P1, P2, P3, P4)
print(
F"completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F"turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print sequence of finished processes
print(
F"sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"
)
| 82 | import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : List[str] , lowercase_ : str) -> str:
"""simple docstring"""
with open(lowercase_ , encoding="utf-8") as input_file:
_UpperCamelCase = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)")
_UpperCamelCase = input_file.read()
_UpperCamelCase = regexp.search(lowercase_)
return match
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : str) -> int:
"""simple docstring"""
with open(lowercase_ , encoding="utf-8") as input_file:
_UpperCamelCase = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL)
_UpperCamelCase = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
_UpperCamelCase = regexp.finditer(lowercase_)
_UpperCamelCase = [match for match in matches if match is not None and match.group(1) is not None]
return matches[0] if matches else None
def __UpperCAmelCase ( self : int) -> int:
"""simple docstring"""
_UpperCamelCase = Path("./datasets")
_UpperCamelCase = list(dataset_paths.absolute().glob("**/*.py"))
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(lowercase_)):
raise AssertionError(f'open(...) must use utf-8 encoding in {dataset}')
def __UpperCAmelCase ( self : str) -> str:
"""simple docstring"""
_UpperCamelCase = Path("./datasets")
_UpperCamelCase = list(dataset_paths.absolute().glob("**/*.py"))
for dataset in dataset_files:
if self._no_print_statements(str(lowercase_)):
raise AssertionError(f'print statement found in {dataset}. Use datasets.logger/logging instead.')
| 82 | 1 |
import unittest
import numpy as np
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ = None , ) ->np.ndarray:
'''simple docstring'''
_UpperCamelCase = np.shape(a__ )
_UpperCamelCase = np.shape(a__ )
_UpperCamelCase = np.shape(a__ )
if shape_a[0] != shape_b[0]:
_UpperCamelCase = (
"Expected the same number of rows for A and B. "
f'Instead found A of size {shape_a} and B of size {shape_b}'
)
raise ValueError(a__ )
if shape_b[1] != shape_c[1]:
_UpperCamelCase = (
"Expected the same number of columns for B and C. "
f'Instead found B of size {shape_b} and C of size {shape_c}'
)
raise ValueError(a__ )
_UpperCamelCase = pseudo_inv
if a_inv is None:
try:
_UpperCamelCase = np.linalg.inv(a__ )
except np.linalg.LinAlgError:
raise ValueError(
"Input matrix A is not invertible. Cannot compute Schur complement." )
return mat_c - mat_b.T @ a_inv @ mat_b
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : Dict) -> None:
"""simple docstring"""
_UpperCamelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]])
_UpperCamelCase = np.array([[0, 3], [3, 0], [2, 3]])
_UpperCamelCase = np.array([[2, 1], [6, 3]])
_UpperCamelCase = schur_complement(lowercase_ , lowercase_ , lowercase_)
_UpperCamelCase = np.block([[a, b], [b.T, c]])
_UpperCamelCase = np.linalg.det(lowercase_)
_UpperCamelCase = np.linalg.det(lowercase_)
_UpperCamelCase = np.linalg.det(lowercase_)
self.assertAlmostEqual(lowercase_ , det_a * det_s)
def __UpperCAmelCase ( self : Dict) -> None:
"""simple docstring"""
_UpperCamelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]])
_UpperCamelCase = np.array([[0, 3], [3, 0], [2, 3]])
_UpperCamelCase = np.array([[2, 1], [6, 3]])
with self.assertRaises(lowercase_):
schur_complement(lowercase_ , lowercase_ , lowercase_)
def __UpperCAmelCase ( self : int) -> None:
"""simple docstring"""
_UpperCamelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]])
_UpperCamelCase = np.array([[0, 3], [3, 0], [2, 3]])
_UpperCamelCase = np.array([[2, 1, 3], [6, 3, 5]])
with self.assertRaises(lowercase_):
schur_complement(lowercase_ , lowercase_ , lowercase_)
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 82 | import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowerCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : str = None , lowercase_ : uuid.UUID = None , lowercase_ : List[Any]=None , lowercase_ : int=None) -> Dict:
"""simple docstring"""
if not conversation_id:
_UpperCamelCase = uuid.uuida()
if past_user_inputs is None:
_UpperCamelCase = []
if generated_responses is None:
_UpperCamelCase = []
_UpperCamelCase = conversation_id
_UpperCamelCase = past_user_inputs
_UpperCamelCase = generated_responses
_UpperCamelCase = text
def __eq__( self : Optional[Any] , lowercase_ : Optional[Any]) -> List[Any]:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def __UpperCAmelCase ( self : List[Any] , lowercase_ : str , lowercase_ : bool = False) -> Any:
"""simple docstring"""
if self.new_user_input:
if overwrite:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
f'with: "{text}".')
_UpperCamelCase = text
else:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
f'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input')
else:
_UpperCamelCase = text
def __UpperCAmelCase ( self : Optional[int]) -> List[Any]:
"""simple docstring"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input)
_UpperCamelCase = None
def __UpperCAmelCase ( self : Dict , lowercase_ : str) -> Optional[Any]:
"""simple docstring"""
self.generated_responses.append(lowercase_)
def __UpperCAmelCase ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : Union[str, Any]) -> int:
"""simple docstring"""
_UpperCamelCase = f'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
_UpperCamelCase = "user" if is_user else "bot"
output += f'{name} >> {text} \n'
return output
@add_end_docstrings(
lowerCAmelCase, R'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''', )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : List[Any] , *lowercase_ : Optional[Any] , **lowercase_ : str) -> List[str]:
"""simple docstring"""
super().__init__(*lowercase_ , **lowercase_)
if self.tokenizer.pad_token_id is None:
_UpperCamelCase = self.tokenizer.eos_token
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Union[str, Any]=None , lowercase_ : int=None , lowercase_ : str=None , **lowercase_ : str) -> Tuple:
"""simple docstring"""
_UpperCamelCase = {}
_UpperCamelCase = {}
_UpperCamelCase = {}
if min_length_for_response is not None:
_UpperCamelCase = min_length_for_response
if minimum_tokens is not None:
_UpperCamelCase = minimum_tokens
if "max_length" in generate_kwargs:
_UpperCamelCase = generate_kwargs["max_length"]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
_UpperCamelCase = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(lowercase_)
return preprocess_params, forward_params, postprocess_params
def __call__( self : Any , lowercase_ : Union[Conversation, List[Conversation]] , lowercase_ : str=0 , **lowercase_ : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = super().__call__(lowercase_ , num_workers=lowercase_ , **lowercase_)
if isinstance(lowercase_ , lowercase_) and len(lowercase_) == 1:
return outputs[0]
return outputs
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Conversation , lowercase_ : Any=32) -> Dict[str, Any]:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_):
raise ValueError("ConversationalPipeline, expects Conversation as inputs")
if conversation.new_user_input is None:
raise ValueError(
f'Conversation with UUID {type(conversation.uuid)} does not contain new user input to process. '
"Add user inputs with the conversation's `add_user_input` method")
if hasattr(self.tokenizer , "_build_conversation_input_ids"):
_UpperCamelCase = self.tokenizer._build_conversation_input_ids(lowercase_)
else:
# If the tokenizer cannot handle conversations, we default to only the old version
_UpperCamelCase = self._legacy_parse_and_tokenize(lowercase_)
if self.framework == "pt":
_UpperCamelCase = torch.LongTensor([input_ids])
elif self.framework == "tf":
_UpperCamelCase = tf.constant([input_ids])
return {"input_ids": input_ids, "conversation": conversation}
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Any , lowercase_ : Optional[int]=10 , **lowercase_ : Dict) -> List[str]:
"""simple docstring"""
_UpperCamelCase = generate_kwargs.get("max_length" , self.model.config.max_length)
_UpperCamelCase = model_inputs["input_ids"].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})')
_UpperCamelCase = max_length - minimum_tokens
_UpperCamelCase = model_inputs["input_ids"][:, -trim:]
if "attention_mask" in model_inputs:
_UpperCamelCase = model_inputs["attention_mask"][:, -trim:]
_UpperCamelCase = model_inputs.pop("conversation")
_UpperCamelCase = max_length
_UpperCamelCase = self.model.generate(**lowercase_ , **lowercase_)
if self.model.config.is_encoder_decoder:
_UpperCamelCase = 1
else:
_UpperCamelCase = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : int=True) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = model_outputs["output_ids"]
_UpperCamelCase = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ , )
_UpperCamelCase = model_outputs["conversation"]
conversation.mark_processed()
conversation.append_response(lowercase_)
return conversation
def __UpperCAmelCase ( self : Any , lowercase_ : Conversation) -> Dict:
"""simple docstring"""
_UpperCamelCase = self.tokenizer.eos_token_id
_UpperCamelCase = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_) + [eos_token_id])
else:
input_ids.extend(self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_))
if len(lowercase_) > self.tokenizer.model_max_length:
_UpperCamelCase = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 82 | 1 |
def lowerCAmelCase__ ( a__ , a__ ) ->int:
'''simple docstring'''
return x if y == 0 else greatest_common_divisor(a__ , x % y )
def lowerCAmelCase__ ( a__ , a__ ) ->int:
'''simple docstring'''
return (x * y) // greatest_common_divisor(a__ , a__ )
def lowerCAmelCase__ ( a__ = 20 ) ->int:
'''simple docstring'''
_UpperCamelCase = 1
for i in range(1 , n + 1 ):
_UpperCamelCase = lcm(a__ , a__ )
return g
if __name__ == "__main__":
print(F"{solution() = }")
| 82 | def lowerCAmelCase__ ( a__ = 50 ) ->int:
'''simple docstring'''
_UpperCamelCase = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F"{solution() = }")
| 82 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''google/vit-base-patch16-224''': '''https://huggingface.co/vit-base-patch16-224/resolve/main/config.json''',
# See all ViT models at https://huggingface.co/models?filter=vit
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''vit'''
def __init__( self : List[Any] , lowercase_ : Union[str, Any]=768 , lowercase_ : List[Any]=12 , lowercase_ : List[Any]=12 , lowercase_ : List[Any]=3072 , lowercase_ : Optional[Any]="gelu" , lowercase_ : List[str]=0.0 , lowercase_ : Union[str, Any]=0.0 , lowercase_ : Dict=0.02 , lowercase_ : List[str]=1e-1_2 , lowercase_ : Any=224 , lowercase_ : int=16 , lowercase_ : Optional[Any]=3 , lowercase_ : Dict=True , lowercase_ : int=16 , **lowercase_ : Optional[int] , ) -> Tuple:
"""simple docstring"""
super().__init__(**lowercase_)
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = image_size
_UpperCamelCase = patch_size
_UpperCamelCase = num_channels
_UpperCamelCase = qkv_bias
_UpperCamelCase = encoder_stride
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = version.parse('''1.11''' )
@property
def __UpperCAmelCase ( self : Union[str, Any]) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def __UpperCAmelCase ( self : Dict) -> float:
"""simple docstring"""
return 1e-4
| 82 | import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->int:
'''simple docstring'''
_UpperCamelCase = 1.5
_UpperCamelCase = int(factor * num_class_images )
_UpperCamelCase = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=a__ , aesthetic_weight=0.1 )
os.makedirs(f'{class_data_dir}/images' , exist_ok=a__ )
if len(list(Path(f'{class_data_dir}/images' ).iterdir() ) ) >= num_class_images:
return
while True:
_UpperCamelCase = client.query(text=a__ )
if len(a__ ) >= factor * num_class_images or num_images > 1e4:
break
else:
_UpperCamelCase = int(factor * num_images )
_UpperCamelCase = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=a__ , aesthetic_weight=0.1 , )
_UpperCamelCase = 0
_UpperCamelCase = 0
_UpperCamelCase = tqdm(desc="downloading real regularization images" , total=a__ )
with open(f'{class_data_dir}/caption.txt' , "w" ) as fa, open(f'{class_data_dir}/urls.txt' , "w" ) as fa, open(
f'{class_data_dir}/images.txt' , "w" ) as fa:
while total < num_class_images:
_UpperCamelCase = class_images[count]
count += 1
try:
_UpperCamelCase = requests.get(images["url"] )
if img.status_code == 200:
_UpperCamelCase = Image.open(BytesIO(img.content ) )
with open(f'{class_data_dir}/images/{total}.jpg' , "wb" ) as f:
f.write(img.content )
fa.write(images["caption"] + "\n" )
fa.write(images["url"] + "\n" )
fa.write(f'{class_data_dir}/images/{total}.jpg' + "\n" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowerCAmelCase__ ( ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase = argparse.ArgumentParser("" , add_help=a__ )
parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=a__ , type=a__ )
parser.add_argument("--class_data_dir" , help="path to save images" , required=a__ , type=a__ )
parser.add_argument("--num_class_images" , help="number of images to download" , default=200 , type=a__ )
return parser.parse_args()
if __name__ == "__main__":
lowerCamelCase__ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 82 | 1 |
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'''vocab_file''': '''spiece.model'''}
lowerCamelCase__ = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
}
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCamelCase__ = {
'''t5-small''': 512,
'''t5-base''': 512,
'''t5-large''': 512,
'''t5-3b''': 512,
'''t5-11b''': 512,
}
lowerCamelCase__ = '''▁'''
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , lowercase_ : int , lowercase_ : str="</s>" , lowercase_ : Optional[Any]="<unk>" , lowercase_ : Dict="<pad>" , lowercase_ : Tuple=100 , lowercase_ : str=None , lowercase_ : Optional[Dict[str, Any]] = None , lowercase_ : str=True , **lowercase_ : Optional[Any] , ) -> None:
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
_UpperCamelCase = [f'<extra_id_{i}>' for i in range(lowercase_)]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_UpperCamelCase = len(set(filter(lambda lowercase_: bool("extra_id" in str(lowercase_)) , lowercase_)))
if extra_tokens != extra_ids:
raise ValueError(
f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens")
if legacy:
logger.warning_once(
f'You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'
" read the related pull request available at https://github.com/huggingface/transformers/pull/24565")
_UpperCamelCase = legacy
_UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , extra_ids=lowercase_ , additional_special_tokens=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , legacy=lowercase_ , **lowercase_ , )
_UpperCamelCase = vocab_file
_UpperCamelCase = extra_ids
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(lowercase_)
@staticmethod
def __UpperCAmelCase ( lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : str) -> Any:
"""simple docstring"""
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
_UpperCamelCase = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
f' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
f' {pretrained_model_name_or_path} automatically truncating your input to'
f' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
f' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , lowercase_ , )
return max_model_length
@property
def __UpperCAmelCase ( self : Dict) -> Optional[int]:
"""simple docstring"""
return self.sp_model.get_piece_size() + self._extra_ids
def __UpperCAmelCase ( self : Dict) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = {self.convert_ids_to_tokens(lowercase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __UpperCAmelCase ( self : Dict , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_)
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowercase_)) + [1]
return ([0] * len(lowercase_)) + [1] + ([0] * len(lowercase_)) + [1]
def __UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
return list(
set(filter(lambda lowercase_: bool(re.search(R"<extra_id_\d+>" , lowercase_)) is not None , self.additional_special_tokens)))
def __UpperCAmelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
return [self._convert_token_to_id(lowercase_) for token in self.get_sentinel_tokens()]
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : List[int]) -> List[int]:
"""simple docstring"""
if len(lowercase_) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
" eos tokens being added.")
return token_ids
else:
return token_ids + [self.eos_token_id]
def __UpperCAmelCase ( self : List[str] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCamelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos) * [0]
return len(token_ids_a + eos + token_ids_a + eos) * [0]
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCamelCase = self._add_eos_if_not_present(lowercase_)
if token_ids_a is None:
return token_ids_a
else:
_UpperCamelCase = self._add_eos_if_not_present(lowercase_)
return token_ids_a + token_ids_a
def __getstate__( self : Tuple) -> Any:
"""simple docstring"""
_UpperCamelCase = self.__dict__.copy()
_UpperCamelCase = None
return state
def __setstate__( self : Optional[Any] , lowercase_ : Any) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs"):
_UpperCamelCase = {}
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def __UpperCAmelCase ( self : int , lowercase_ : "TextInput" , **lowercase_ : Optional[int]) -> List[str]:
"""simple docstring"""
if not self.legacy:
_UpperCamelCase = SPIECE_UNDERLINE + text.replace(lowercase_ , " ")
return super().tokenize(lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : int , **lowercase_ : Optional[int]) -> List[str]:
"""simple docstring"""
if not self.legacy:
_UpperCamelCase = text.startswith(lowercase_)
if is_first:
_UpperCamelCase = text[1:]
_UpperCamelCase = self.sp_model.encode(lowercase_ , out_type=lowercase_)
if not self.legacy and not is_first and not text.startswith(" ") and tokens[0].startswith(lowercase_):
_UpperCamelCase = ([tokens[0][1:]] if len(tokens[0]) > 1 else []) + tokens[1:]
return tokens
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Optional[Any]) -> List[Any]:
"""simple docstring"""
if token.startswith("<extra_id_"):
_UpperCamelCase = re.match(R"<extra_id_(\d+)>" , lowercase_)
_UpperCamelCase = int(match.group(1))
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(lowercase_)
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Any) -> int:
"""simple docstring"""
if index < self.sp_model.get_piece_size():
_UpperCamelCase = self.sp_model.IdToPiece(lowercase_)
else:
_UpperCamelCase = f'<extra_id_{self.vocab_size - 1 - index}>'
return token
def __UpperCAmelCase ( self : Dict , lowercase_ : Optional[int]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = []
_UpperCamelCase = ""
_UpperCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase_) + token
_UpperCamelCase = True
_UpperCamelCase = []
else:
current_sub_tokens.append(lowercase_)
_UpperCamelCase = False
out_string += self.sp_model.decode(lowercase_)
return out_string.strip()
def __UpperCAmelCase ( self : List[str] , lowercase_ : str , lowercase_ : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowercase_):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
_UpperCamelCase = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowercase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowercase_)
elif not os.path.isfile(self.vocab_file):
with open(lowercase_ , "wb") as fi:
_UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(lowercase_)
return (out_vocab_file,)
| 82 | import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
lowerCamelCase__ = '''\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
'''
lowerCamelCase__ = '''\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.
'''
lowerCamelCase__ = R'''
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting "1/2" to "\\frac{1}{2}")
Examples:
>>> metric = datasets.load_metric("competition_math")
>>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])
>>> print(results)
{\'accuracy\': 1.0}
'''
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def __UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string"),
"references": datasets.Value("string"),
}) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , )
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Tuple , lowercase_ : str) -> Tuple:
"""simple docstring"""
_UpperCamelCase = 0.0
for i, j in zip(lowercase_ , lowercase_):
n_correct += 1.0 if math_equivalence.is_equiv(lowercase_ , lowercase_) else 0.0
_UpperCamelCase = n_correct / len(lowercase_)
return {
"accuracy": accuracy,
}
| 82 | 1 |
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
lowerCamelCase__ = logging.get_logger(__name__)
logging.set_verbosity_info()
def lowerCAmelCase__ ( a__ , a__ ) ->str:
'''simple docstring'''
if "xprophetnet" in prophetnet_checkpoint_path:
_UpperCamelCase = XLMProphetNetForConditionalGenerationOld.from_pretrained(a__ )
_UpperCamelCase , _UpperCamelCase = XLMProphetNetForConditionalGeneration.from_pretrained(
a__ , output_loading_info=a__ )
else:
_UpperCamelCase = ProphetNetForConditionalGenerationOld.from_pretrained(a__ )
_UpperCamelCase , _UpperCamelCase = ProphetNetForConditionalGeneration.from_pretrained(
a__ , output_loading_info=a__ )
_UpperCamelCase = ["key_proj", "value_proj", "query_proj"]
_UpperCamelCase = {
"self_attn": "ngram_self_attn",
"cross_attn": "encoder_attn",
"cross_attn_layer_norm": "encoder_attn_layer_norm",
"feed_forward_layer_norm": "final_layer_norm",
"feed_forward": "",
"intermediate": "fc1",
"output": "fc2",
"key_proj": "k_proj",
"query_proj": "q_proj",
"value_proj": "v_proj",
"word_embeddings": "embed_tokens",
"embeddings_layer_norm": "emb_layer_norm",
"relative_pos_embeddings": "relative_linear",
"ngram_embeddings": "ngram_input_embed",
"position_embeddings": "embed_positions",
}
for key in loading_info["missing_keys"]:
_UpperCamelCase = key.split("." )
if attributes[0] == "lm_head":
_UpperCamelCase = prophet
_UpperCamelCase = prophet_old
else:
_UpperCamelCase = prophet.prophetnet
_UpperCamelCase = prophet_old.model
_UpperCamelCase = False
for attribute in attributes:
if attribute in mapping:
_UpperCamelCase = mapping[attribute]
if not hasattr(a__ , a__ ) and len(a__ ) > 0:
_UpperCamelCase = attribute
elif hasattr(a__ , a__ ):
_UpperCamelCase = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_UpperCamelCase = old_model.weight
logger.info(f'{attribute} is initialized.' )
_UpperCamelCase = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_UpperCamelCase = old_model.bias
logger.info(f'{attribute} is initialized' )
_UpperCamelCase = True
break
elif attribute in special_keys and hasattr(a__ , "in_proj_weight" ):
_UpperCamelCase = old_model.in_proj_weight.shape[0] // 3
_UpperCamelCase = getattr(a__ , a__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_UpperCamelCase = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_UpperCamelCase = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_UpperCamelCase = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_UpperCamelCase = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_UpperCamelCase = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_UpperCamelCase = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_UpperCamelCase = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
_UpperCamelCase = nn.Parameter(old_model.embed_positions.weight[:512, :] )
_UpperCamelCase = True
break
if attribute.isdigit():
_UpperCamelCase = model[int(a__ )]
_UpperCamelCase = old_model[int(a__ )]
else:
_UpperCamelCase = getattr(a__ , a__ )
if old_attribute == "":
_UpperCamelCase = old_model
else:
if not hasattr(a__ , a__ ):
raise ValueError(f'{old_model} does not have {old_attribute}' )
_UpperCamelCase = getattr(a__ , a__ )
if not is_key_init:
raise ValueError(f'{key} was not correctly initialized!' )
print(f'Saving model to {pytorch_dump_folder_path}' )
prophet.save_pretrained(a__ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase__ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 82 | import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
lowerCamelCase__ = 5_0000
lowerCamelCase__ = 5000
lowerCamelCase__,lowerCamelCase__ = os.path.split(__file__)
lowerCamelCase__ = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def lowerCAmelCase__ ( a__ , a__ ) ->int:
'''simple docstring'''
for i in range(a__ ):
_UpperCamelCase = dataset[i]
@get_duration
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->int:
'''simple docstring'''
for i in range(0 , len(a__ ) , a__ ):
_UpperCamelCase = dataset[i : i + batch_size]
@get_duration
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->Union[str, Any]:
'''simple docstring'''
with dataset.formatted_as(type=a__ ):
for i in range(a__ ):
_UpperCamelCase = dataset[i]
@get_duration
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ ) ->Dict:
'''simple docstring'''
with dataset.formatted_as(type=a__ ):
for i in range(0 , a__ , a__ ):
_UpperCamelCase = dataset[i : i + batch_size]
def lowerCAmelCase__ ( ) ->Dict:
'''simple docstring'''
_UpperCamelCase = {"num examples": SPEED_TEST_N_EXAMPLES}
_UpperCamelCase = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted, {"type": "pandas", "length": SMALL_TEST}),
(read_formatted, {"type": "torch", "length": SMALL_TEST}),
(read_formatted, {"type": "tensorflow", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
_UpperCamelCase = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset" )
_UpperCamelCase = datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} )
_UpperCamelCase = generate_example_dataset(
os.path.join(a__ , "dataset.arrow" ) , a__ , num_examples=a__ , seq_shapes={"list": (100,)} , )
print("first set of iterations" )
for func, kwargs in functions:
print(func.__name__ , str(a__ ) )
_UpperCamelCase = func(a__ , **a__ )
print("shuffling dataset" )
_UpperCamelCase = dataset.shuffle()
print("Second set of iterations (after shuffling" )
for func, kwargs in functions_shuffled:
print("shuffled " , func.__name__ , str(a__ ) )
_UpperCamelCase = func(
a__ , **a__ )
with open(a__ , "wb" ) as f:
f.write(json.dumps(a__ ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 82 | 1 |
def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
assert (
isinstance(a__ , a__ ) and number_of_steps > 0
), f'number_of_steps needs to be positive integer, your input {number_of_steps}'
if number_of_steps == 1:
return 1
_UpperCamelCase , _UpperCamelCase = 1, 1
for _ in range(number_of_steps - 1 ):
_UpperCamelCase , _UpperCamelCase = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
torch.manual_seed(0)
_UpperCamelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def __UpperCAmelCase ( self : List[Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = self.dummy_uncond_unet
_UpperCamelCase = KarrasVeScheduler()
_UpperCamelCase = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_)
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = pipe(num_inference_steps=2 , generator=lowercase_ , output_type="numpy").images
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = pipe(num_inference_steps=2 , generator=lowercase_ , output_type="numpy" , return_dict=lowercase_)[0]
_UpperCamelCase = image[0, -3:, -3:, -1]
_UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
_UpperCamelCase = "google/ncsnpp-celebahq-256"
_UpperCamelCase = UNetaDModel.from_pretrained(lowercase_)
_UpperCamelCase = KarrasVeScheduler()
_UpperCamelCase = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_)
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = pipe(num_inference_steps=20 , generator=lowercase_ , output_type="numpy").images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_UpperCamelCase = np.array([0.5_78, 0.58_11, 0.59_24, 0.58_09, 0.5_87, 0.58_86, 0.58_61, 0.58_02, 0.5_86])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 82 | 1 |
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase_ : Distribution , lowercase_ : List[str]=None , lowercase_ : List[str]=None , lowercase_ : Dict=0) -> int:
"""simple docstring"""
_UpperCamelCase = 1.0 if scale is None else scale
_UpperCamelCase = 0.0 if loc is None else loc
super().__init__(lowercase_ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=lowercase_)])
@property
def __UpperCAmelCase ( self : Optional[int]) -> List[str]:
"""simple docstring"""
return self.base_dist.mean * self.scale + self.loc
@property
def __UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
return self.base_dist.variance * self.scale**2
@property
def __UpperCAmelCase ( self : str) -> List[Any]:
"""simple docstring"""
return self.variance.sqrt()
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase_ : int , lowercase_ : Dict[str, int] , lowercase_ : Callable[..., Tuple[torch.Tensor]] , **lowercase_ : Union[str, Any]) -> None:
"""simple docstring"""
super().__init__(**lowercase_)
_UpperCamelCase = args_dim
_UpperCamelCase = nn.ModuleList([nn.Linear(lowercase_ , lowercase_) for dim in args_dim.values()])
_UpperCamelCase = domain_map
def __UpperCAmelCase ( self : Dict , lowercase_ : torch.Tensor) -> Tuple[torch.Tensor]:
"""simple docstring"""
_UpperCamelCase = [proj(lowercase_) for proj in self.proj]
return self.domain_map(*lowercase_)
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , lowercase_ : Dict) -> Dict:
"""simple docstring"""
super().__init__()
_UpperCamelCase = function
def __UpperCAmelCase ( self : Dict , lowercase_ : Tuple , *lowercase_ : Optional[int]) -> List[Any]:
"""simple docstring"""
return self.function(lowercase_ , *lowercase_)
class _UpperCAmelCase :
'''simple docstring'''
__A = 42
__A = 42
__A = 42
def __init__( self : Optional[Any] , lowercase_ : int = 1) -> None:
"""simple docstring"""
_UpperCamelCase = dim
_UpperCamelCase = {k: dim * self.args_dim[k] for k in self.args_dim}
def __UpperCAmelCase ( self : List[Any] , lowercase_ : List[Any]) -> str:
"""simple docstring"""
if self.dim == 1:
return self.distribution_class(*lowercase_)
else:
return Independent(self.distribution_class(*lowercase_) , 1)
def __UpperCAmelCase ( self : str , lowercase_ : Tuple , lowercase_ : Optional[torch.Tensor] = None , lowercase_ : Optional[torch.Tensor] = None , ) -> Distribution:
"""simple docstring"""
_UpperCamelCase = self._base_distribution(lowercase_)
if loc is None and scale is None:
return distr
else:
return AffineTransformed(lowercase_ , loc=lowercase_ , scale=lowercase_ , event_dim=self.event_dim)
@property
def __UpperCAmelCase ( self : Optional[Any]) -> Tuple:
"""simple docstring"""
return () if self.dim == 1 else (self.dim,)
@property
def __UpperCAmelCase ( self : Optional[int]) -> int:
"""simple docstring"""
return len(self.event_shape)
@property
def __UpperCAmelCase ( self : Union[str, Any]) -> float:
"""simple docstring"""
return 0.0
def __UpperCAmelCase ( self : int , lowercase_ : int) -> nn.Module:
"""simple docstring"""
return ParameterProjection(
in_features=lowercase_ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map) , )
def __UpperCAmelCase ( self : Optional[int] , *lowercase_ : torch.Tensor) -> Dict:
"""simple docstring"""
raise NotImplementedError()
@staticmethod
def __UpperCAmelCase ( lowercase_ : torch.Tensor) -> torch.Tensor:
"""simple docstring"""
return (x + torch.sqrt(torch.square(lowercase_) + 4.0)) / 2.0
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = {"df": 1, "loc": 1, "scale": 1}
__A = StudentT
@classmethod
def __UpperCAmelCase ( cls : str , lowercase_ : torch.Tensor , lowercase_ : torch.Tensor , lowercase_ : torch.Tensor) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = cls.squareplus(lowercase_).clamp_min(torch.finfo(scale.dtype).eps)
_UpperCamelCase = 2.0 + cls.squareplus(lowercase_)
return df.squeeze(-1), loc.squeeze(-1), scale.squeeze(-1)
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = {"loc": 1, "scale": 1}
__A = Normal
@classmethod
def __UpperCAmelCase ( cls : Union[str, Any] , lowercase_ : torch.Tensor , lowercase_ : torch.Tensor) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = cls.squareplus(lowercase_).clamp_min(torch.finfo(scale.dtype).eps)
return loc.squeeze(-1), scale.squeeze(-1)
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = {"total_count": 1, "logits": 1}
__A = NegativeBinomial
@classmethod
def __UpperCAmelCase ( cls : Tuple , lowercase_ : torch.Tensor , lowercase_ : torch.Tensor) -> Any:
"""simple docstring"""
_UpperCamelCase = cls.squareplus(lowercase_)
return total_count.squeeze(-1), logits.squeeze(-1)
def __UpperCAmelCase ( self : Any , lowercase_ : Tuple) -> Distribution:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = distr_args
if self.dim == 1:
return self.distribution_class(total_count=lowercase_ , logits=lowercase_)
else:
return Independent(self.distribution_class(total_count=lowercase_ , logits=lowercase_) , 1)
def __UpperCAmelCase ( self : List[str] , lowercase_ : Union[str, Any] , lowercase_ : Optional[torch.Tensor] = None , lowercase_ : Optional[torch.Tensor] = None) -> Distribution:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits))
| 82 | import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__A = MODEL_FOR_MASKED_LM_MAPPING
__A = TF_MODEL_FOR_MASKED_LM_MAPPING
def __UpperCAmelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def __UpperCAmelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf")
_UpperCamelCase = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is grouped", "score": 2.1e-0_5, "token": 38015, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-0_5, "token": 25506, "token_str": " accuser"},
] , )
_UpperCamelCase = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-0_5,
"token": 38015,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-0_5,
"token": 25506,
"token_str": " accuser",
},
] , )
_UpperCamelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3)
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is Clara", "score": 2e-0_5, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-0_5, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-0_5, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt")
_UpperCamelCase = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is Maul", "score": 2.2e-0_5, "token": 35676, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-0_5, "token": 16416, "token_str": "ELS"},
] , )
_UpperCamelCase = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-0_5,
"token": 35676,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-0_5, "token": 16416, "token_str": "ELS"},
] , )
_UpperCamelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3)
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is Patrick", "score": 2.1e-0_5, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-0_5, "token": 2941, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-0_5, "token": 13606, "token_str": " Clara"},
] , )
_UpperCamelCase = unmasker("My name is <mask> <mask>" , top_k=2)
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
[
{
"score": 2.2e-0_5,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-0_5, "token": 16416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-0_5,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-0_5, "token": 16416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def __UpperCAmelCase ( self : Tuple) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt")
# convert model to fp16
pipe.model.half()
_UpperCamelCase = pipe("Paris is the [MASK] of France.")
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(lowercase_ , lowercase_)
@slow
@require_torch
def __UpperCAmelCase ( self : List[Any]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt")
self.run_large_test(lowercase_)
@slow
@require_tf
def __UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf")
self.run_large_test(lowercase_)
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : int) -> Any:
"""simple docstring"""
_UpperCamelCase = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(lowercase_) , [
{"sequence": "My name is John", "score": 0.0_08, "token": 610, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.0_07, "token": 1573, "token_str": " Chris"},
] , )
_UpperCamelCase = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(lowercase_) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.2_51,
"token": 2201,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.2_14,
"token": 12790,
"token_str": " Lyon",
},
] , )
_UpperCamelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3)
self.assertEqual(
nested_simplify(lowercase_) , [
{"sequence": "My name is Patrick", "score": 0.0_05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.0_00, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.0_00, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt")
_UpperCamelCase = None
_UpperCamelCase = None
self.run_pipeline_test(lowercase_ , [])
@require_tf
def __UpperCAmelCase ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf")
_UpperCamelCase = None
_UpperCamelCase = None
self.run_pipeline_test(lowercase_ , [])
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Optional[int]) -> int:
"""simple docstring"""
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)")
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = [
f'This is another {tokenizer.mask_token} test',
]
return fill_masker, examples
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[int]) -> str:
"""simple docstring"""
_UpperCamelCase = fill_masker.tokenizer
_UpperCamelCase = fill_masker.model
_UpperCamelCase = fill_masker(
f'This is a {tokenizer.mask_token}' , )
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = fill_masker([f'This is a {tokenizer.mask_token}'])
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = fill_masker([f'This is a {tokenizer.mask_token}', f'Another {tokenizer.mask_token} great test.'])
self.assertEqual(
lowercase_ , [
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
] , )
with self.assertRaises(lowercase_):
fill_masker([None])
# No mask_token is not supported
with self.assertRaises(lowercase_):
fill_masker("This is")
self.run_test_top_k(lowercase_ , lowercase_)
self.run_test_targets(lowercase_ , lowercase_)
self.run_test_top_k_targets(lowercase_ , lowercase_)
self.fill_mask_with_duplicate_targets_and_top_k(lowercase_ , lowercase_)
self.fill_mask_with_multiple_masks(lowercase_ , lowercase_)
def __UpperCAmelCase ( self : int , lowercase_ : Dict , lowercase_ : List[str]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = tokenizer.get_vocab()
_UpperCamelCase = sorted(vocab.keys())[:2]
# Pipeline argument
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_ , targets=lowercase_)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}')
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , lowercase_)
_UpperCamelCase = [tokenizer.decode([x]) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(lowercase_))
# Call argument
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase_)
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , lowercase_)
_UpperCamelCase = [tokenizer.decode([x]) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(lowercase_))
# Score equivalence
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase_)
_UpperCamelCase = [top_mask["token_str"] for top_mask in outputs]
_UpperCamelCase = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowercase_) == set(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase_)
_UpperCamelCase = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(lowercase_) , nested_simplify(lowercase_))
# Raises with invalid
with self.assertRaises(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[])
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[""])
with self.assertRaises(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets="")
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : str , lowercase_ : List[str]) -> Any:
"""simple docstring"""
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_ , top_k=2)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}')
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2)
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
self.assertEqual(nested_simplify(lowercase_) , nested_simplify(lowercase_))
def __UpperCAmelCase ( self : Any , lowercase_ : Union[str, Any] , lowercase_ : Tuple) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = tokenizer.get_vocab()
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
# top_k=2, ntargets=3
_UpperCamelCase = sorted(vocab.keys())[:3]
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2 , targets=lowercase_)
# If we use the most probably targets, and filter differently, we should still
# have the same results
_UpperCamelCase = [el["token_str"] for el in sorted(lowercase_ , key=lambda lowercase_: x["score"] , reverse=lowercase_)]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowercase_).issubset(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=3 , targets=lowercase_)
# They should yield exactly the same result
self.assertEqual(nested_simplify(lowercase_) , nested_simplify(lowercase_))
def __UpperCAmelCase ( self : int , lowercase_ : Optional[int] , lowercase_ : List[str]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = tokenizer.get_vocab()
# String duplicates + id duplicates
_UpperCamelCase = sorted(vocab.keys())[:3]
_UpperCamelCase = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_UpperCamelCase = fill_masker(f'My name is {tokenizer.mask_token}' , targets=lowercase_ , top_k=10)
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(lowercase_) , 3)
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Any) -> Dict:
"""simple docstring"""
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = fill_masker(
f'This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}' , top_k=2)
self.assertEqual(
lowercase_ , [
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
] , )
| 82 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 82 | lowerCamelCase__ = '''Alexander Joslin'''
import operator as op
from .stack import Stack
def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
_UpperCamelCase = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
_UpperCamelCase = Stack()
_UpperCamelCase = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(a__ ) )
elif i in operators:
# RULE 2
operator_stack.push(a__ )
elif i == ")":
# RULE 4
_UpperCamelCase = operator_stack.peek()
operator_stack.pop()
_UpperCamelCase = operand_stack.peek()
operand_stack.pop()
_UpperCamelCase = operand_stack.peek()
operand_stack.pop()
_UpperCamelCase = operators[opr](a__ , a__ )
operand_stack.push(a__ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCamelCase__ = '''(5 + ((4 * 2) * (2 + 3)))'''
# answer = 45
print(F"{equation} = {dijkstras_two_stack_algorithm(equation)}")
| 82 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'''
),
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''dpr'''
def __init__( self : Optional[Any] , lowercase_ : int=30522 , lowercase_ : str=768 , lowercase_ : List[Any]=12 , lowercase_ : Dict=12 , lowercase_ : str=3072 , lowercase_ : Any="gelu" , lowercase_ : Any=0.1 , lowercase_ : Any=0.1 , lowercase_ : str=512 , lowercase_ : str=2 , lowercase_ : List[Any]=0.02 , lowercase_ : Dict=1e-1_2 , lowercase_ : List[str]=0 , lowercase_ : Union[str, Any]="absolute" , lowercase_ : int = 0 , **lowercase_ : int , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=lowercase_ , **lowercase_)
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_act
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = projection_dim
_UpperCamelCase = position_embedding_type
| 82 | import logging
from transformers import PretrainedConfig
lowerCamelCase__ = logging.getLogger(__name__)
lowerCamelCase__ = {
'''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''',
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''bertabs'''
def __init__( self : List[str] , lowercase_ : int=30522 , lowercase_ : str=512 , lowercase_ : int=6 , lowercase_ : Optional[Any]=512 , lowercase_ : Optional[Any]=8 , lowercase_ : Optional[int]=512 , lowercase_ : Tuple=0.2 , lowercase_ : Union[str, Any]=6 , lowercase_ : List[Any]=768 , lowercase_ : List[str]=8 , lowercase_ : int=2048 , lowercase_ : Tuple=0.2 , **lowercase_ : str , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**lowercase_)
_UpperCamelCase = vocab_size
_UpperCamelCase = max_pos
_UpperCamelCase = enc_layers
_UpperCamelCase = enc_hidden_size
_UpperCamelCase = enc_heads
_UpperCamelCase = enc_ff_size
_UpperCamelCase = enc_dropout
_UpperCamelCase = dec_layers
_UpperCamelCase = dec_hidden_size
_UpperCamelCase = dec_heads
_UpperCamelCase = dec_ff_size
_UpperCamelCase = dec_dropout
| 82 | 1 |
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ , a__ ) ->Tuple:
'''simple docstring'''
_UpperCamelCase = StableDiffusionPipeline.from_pretrained(a__ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
_UpperCamelCase = load_file(a__ )
_UpperCamelCase = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
_UpperCamelCase = key.split("." )[0].split(LORA_PREFIX_TEXT_ENCODER + "_" )[-1].split("_" )
_UpperCamelCase = pipeline.text_encoder
else:
_UpperCamelCase = key.split("." )[0].split(LORA_PREFIX_UNET + "_" )[-1].split("_" )
_UpperCamelCase = pipeline.unet
# find the target layer
_UpperCamelCase = layer_infos.pop(0 )
while len(a__ ) > -1:
try:
_UpperCamelCase = curr_layer.__getattr__(a__ )
if len(a__ ) > 0:
_UpperCamelCase = layer_infos.pop(0 )
elif len(a__ ) == 0:
break
except Exception:
if len(a__ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
_UpperCamelCase = layer_infos.pop(0 )
_UpperCamelCase = []
if "lora_down" in key:
pair_keys.append(key.replace("lora_down" , "lora_up" ) )
pair_keys.append(a__ )
else:
pair_keys.append(a__ )
pair_keys.append(key.replace("lora_up" , "lora_down" ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
_UpperCamelCase = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
_UpperCamelCase = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(a__ , a__ ).unsqueeze(2 ).unsqueeze(3 )
else:
_UpperCamelCase = state_dict[pair_keys[0]].to(torch.floataa )
_UpperCamelCase = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(a__ , a__ )
# update visited list
for item in pair_keys:
visited.append(a__ )
return pipeline
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_path''', default=None, type=str, required=True, help='''Path to the base model in diffusers format.'''
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--lora_prefix_unet''', default='''lora_unet''', type=str, help='''The prefix of UNet weight in safetensors'''
)
parser.add_argument(
'''--lora_prefix_text_encoder''',
default='''lora_te''',
type=str,
help='''The prefix of text encoder weight in safetensors''',
)
parser.add_argument('''--alpha''', default=0.75, type=float, help='''The merging ratio in W = W0 + alpha * deltaW''')
parser.add_argument(
'''--to_safetensors''', action='''store_true''', help='''Whether to store pipeline in safetensors format or not.'''
)
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
lowerCamelCase__ = parser.parse_args()
lowerCamelCase__ = args.base_model_path
lowerCamelCase__ = args.checkpoint_path
lowerCamelCase__ = args.dump_path
lowerCamelCase__ = args.lora_prefix_unet
lowerCamelCase__ = args.lora_prefix_text_encoder
lowerCamelCase__ = args.alpha
lowerCamelCase__ = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
lowerCamelCase__ = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 82 | from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowerCamelCase__ = input('''Enter image url: ''').strip()
print(F"Downloading image from {url} ...")
lowerCamelCase__ = BeautifulSoup(requests.get(url).content, '''html.parser''')
# The image URL is in the content field of the first meta tag with property og:image
lowerCamelCase__ = soup.find('''meta''', {'''property''': '''og:image'''})['''content''']
lowerCamelCase__ = requests.get(image_url).content
lowerCamelCase__ = F"{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"
with open(file_name, '''wb''') as fp:
fp.write(image_data)
print(F"Done. Image saved to disk as {file_name}.")
| 82 | 1 |
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
"compression_format, is_archive" , [
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
] , )
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ) ->List[str]:
'''simple docstring'''
_UpperCamelCase = {
"7z": (seven_zip_file, SevenZipExtractor),
"bz2": (bza_file, BzipaExtractor),
"gzip": (gz_file, GzipExtractor),
"lz4": (lza_file, LzaExtractor),
"tar": (tar_file, TarExtractor),
"xz": (xz_file, XzExtractor),
"zip": (zip_file, ZipExtractor),
"zstd": (zstd_file, ZstdExtractor),
}
_UpperCamelCase , _UpperCamelCase = input_paths_and_base_extractors[compression_format]
if input_path is None:
_UpperCamelCase = f'for \'{compression_format}\' compression_format, '
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(a__ )
assert base_extractor.is_extractable(a__ )
_UpperCamelCase = tmp_path / ("extracted" if is_archive else "extracted.txt")
base_extractor.extract(a__ , a__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
_UpperCamelCase = file_path.read_text(encoding="utf-8" )
else:
_UpperCamelCase = output_path.read_text(encoding="utf-8" )
_UpperCamelCase = text_file.read_text(encoding="utf-8" )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
"compression_format, is_archive" , [
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
] , )
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ) ->str:
'''simple docstring'''
_UpperCamelCase = {
"7z": seven_zip_file,
"bz2": bza_file,
"gzip": gz_file,
"lz4": lza_file,
"tar": tar_file,
"xz": xz_file,
"zip": zip_file,
"zstd": zstd_file,
}
_UpperCamelCase = input_paths[compression_format]
if input_path is None:
_UpperCamelCase = f'for \'{compression_format}\' compression_format, '
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(a__ )
_UpperCamelCase = Extractor.infer_extractor_format(a__ )
assert extractor_format is not None
_UpperCamelCase = tmp_path / ("extracted" if is_archive else "extracted.txt")
Extractor.extract(a__ , a__ , a__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
_UpperCamelCase = file_path.read_text(encoding="utf-8" )
else:
_UpperCamelCase = output_path.read_text(encoding="utf-8" )
_UpperCamelCase = text_file.read_text(encoding="utf-8" )
assert extracted_file_content == expected_file_content
@pytest.fixture
def lowerCAmelCase__ ( a__ , a__ ) ->Any:
'''simple docstring'''
import tarfile
_UpperCamelCase = tmp_path / "data_dot_dot"
directory.mkdir()
_UpperCamelCase = directory / "tar_file_with_dot_dot.tar"
with tarfile.TarFile(a__ , "w" ) as f:
f.add(a__ , arcname=os.path.join(".." , text_file.name ) )
return path
@pytest.fixture
def lowerCAmelCase__ ( a__ ) ->Any:
'''simple docstring'''
import tarfile
_UpperCamelCase = tmp_path / "data_sym_link"
directory.mkdir()
_UpperCamelCase = directory / "tar_file_with_sym_link.tar"
os.symlink(".." , directory / "subdir" , target_is_directory=a__ )
with tarfile.TarFile(a__ , "w" ) as f:
f.add(str(directory / "subdir" ) , arcname="subdir" ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
"insecure_tar_file, error_log" , [("tar_file_with_dot_dot", "illegal path"), ("tar_file_with_sym_link", "Symlink")] , )
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ , a__ , a__ ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase = {
"tar_file_with_dot_dot": tar_file_with_dot_dot,
"tar_file_with_sym_link": tar_file_with_sym_link,
}
_UpperCamelCase = insecure_tar_files[insecure_tar_file]
_UpperCamelCase = tmp_path / "extracted"
TarExtractor.extract(a__ , a__ )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def lowerCAmelCase__ ( a__ ) ->Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = tmpdir / "not_a_zip_file"
# From: https://github.com/python/cpython/pull/5053
_UpperCamelCase = (
B"\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00"
B"\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I"
B"DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07"
B"\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82"
)
with not_a_zip_file.open("wb" ) as f:
f.write(a__ )
assert zipfile.is_zipfile(str(a__ ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(a__ ) # but we're right
| 82 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'''
),
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''dpr'''
def __init__( self : Optional[Any] , lowercase_ : int=30522 , lowercase_ : str=768 , lowercase_ : List[Any]=12 , lowercase_ : Dict=12 , lowercase_ : str=3072 , lowercase_ : Any="gelu" , lowercase_ : Any=0.1 , lowercase_ : Any=0.1 , lowercase_ : str=512 , lowercase_ : str=2 , lowercase_ : List[Any]=0.02 , lowercase_ : Dict=1e-1_2 , lowercase_ : List[str]=0 , lowercase_ : Union[str, Any]="absolute" , lowercase_ : int = 0 , **lowercase_ : int , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=lowercase_ , **lowercase_)
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_act
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = projection_dim
_UpperCamelCase = position_embedding_type
| 82 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = 42
__A = 42
__A = None
class _UpperCAmelCase ( lowerCAmelCase, lowerCAmelCase ):
'''simple docstring'''
__A = 2
@register_to_config
def __init__( self : Tuple , lowercase_ : float = 0.02 , lowercase_ : float = 100 , lowercase_ : float = 1.0_07 , lowercase_ : float = 80 , lowercase_ : float = 0.05 , lowercase_ : float = 50 , ) -> str:
"""simple docstring"""
_UpperCamelCase = sigma_max
# setable values
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None # sigma(t_i)
def __UpperCAmelCase ( self : Any , lowercase_ : torch.FloatTensor , lowercase_ : Optional[int] = None) -> torch.FloatTensor:
"""simple docstring"""
return sample
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : int , lowercase_ : Union[str, torch.device] = None) -> Dict:
"""simple docstring"""
_UpperCamelCase = num_inference_steps
_UpperCamelCase = np.arange(0 , self.num_inference_steps)[::-1].copy()
_UpperCamelCase = torch.from_numpy(lowercase_).to(lowercase_)
_UpperCamelCase = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
_UpperCamelCase = torch.tensor(lowercase_ , dtype=torch.floataa , device=lowercase_)
def __UpperCAmelCase ( self : Dict , lowercase_ : torch.FloatTensor , lowercase_ : float , lowercase_ : Optional[torch.Generator] = None) -> Tuple[torch.FloatTensor, float]:
"""simple docstring"""
if self.config.s_min <= sigma <= self.config.s_max:
_UpperCamelCase = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1)
else:
_UpperCamelCase = 0
# sample eps ~ N(0, S_noise^2 * I)
_UpperCamelCase = self.config.s_noise * randn_tensor(sample.shape , generator=lowercase_).to(sample.device)
_UpperCamelCase = sigma + gamma * sigma
_UpperCamelCase = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __UpperCAmelCase ( self : str , lowercase_ : torch.FloatTensor , lowercase_ : float , lowercase_ : float , lowercase_ : torch.FloatTensor , lowercase_ : bool = True , ) -> Union[KarrasVeOutput, Tuple]:
"""simple docstring"""
_UpperCamelCase = sample_hat + sigma_hat * model_output
_UpperCamelCase = (sample_hat - pred_original_sample) / sigma_hat
_UpperCamelCase = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowercase_ , derivative=lowercase_ , pred_original_sample=lowercase_)
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : torch.FloatTensor , lowercase_ : float , lowercase_ : float , lowercase_ : torch.FloatTensor , lowercase_ : torch.FloatTensor , lowercase_ : torch.FloatTensor , lowercase_ : bool = True , ) -> Union[KarrasVeOutput, Tuple]:
"""simple docstring"""
_UpperCamelCase = sample_prev + sigma_prev * model_output
_UpperCamelCase = (sample_prev - pred_original_sample) / sigma_prev
_UpperCamelCase = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowercase_ , derivative=lowercase_ , pred_original_sample=lowercase_)
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : Any) -> Any:
"""simple docstring"""
raise NotImplementedError()
| 82 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
'''configuration_table_transformer''': [
'''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TableTransformerConfig''',
'''TableTransformerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TableTransformerForObjectDetection''',
'''TableTransformerModel''',
'''TableTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 82 | 1 |
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : Dict) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small")
_UpperCamelCase = AutoTokenizer.from_pretrained("google/mt5-small")
_UpperCamelCase = tokenizer("Hello there" , return_tensors="np").input_ids
_UpperCamelCase = tokenizer("Hi I am" , return_tensors="np").input_ids
_UpperCamelCase = shift_tokens_right(lowercase_ , model.config.pad_token_id , model.config.decoder_start_token_id)
_UpperCamelCase = model(lowercase_ , decoder_input_ids=lowercase_).logits
_UpperCamelCase = optax.softmax_cross_entropy(lowercase_ , onehot(lowercase_ , logits.shape[-1])).mean()
_UpperCamelCase = -(labels.shape[-1] * loss.item())
_UpperCamelCase = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
| 82 | import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowerCamelCase__ = {
'''cola''': 2,
'''mnli''': 3,
'''mrpc''': 2,
'''sst-2''': 2,
'''sts-b''': 1,
'''qqp''': 2,
'''qnli''': 2,
'''rte''': 2,
'''wnli''': 2,
}
logging.set_verbosity_info()
def lowerCAmelCase__ ( a__ , a__ , a__ , a__=None ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase = XLNetConfig.from_json_file(a__ )
_UpperCamelCase = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f'Building PyTorch XLNetForSequenceClassification model from configuration: {config}' )
_UpperCamelCase = finetuning_task
_UpperCamelCase = GLUE_TASKS_NUM_LABELS[finetuning_task]
_UpperCamelCase = XLNetForSequenceClassification(a__ )
elif "squad" in finetuning_task:
_UpperCamelCase = finetuning_task
_UpperCamelCase = XLNetForQuestionAnswering(a__ )
else:
_UpperCamelCase = XLNetLMHeadModel(a__ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(a__ , a__ , a__ )
# Save pytorch-model
_UpperCamelCase = os.path.join(a__ , a__ )
_UpperCamelCase = os.path.join(a__ , a__ )
print(f'Save PyTorch model to {os.path.abspath(a__ )}' )
torch.save(model.state_dict() , a__ )
print(f'Save configuration file to {os.path.abspath(a__ )}' )
with open(a__ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--xlnet_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained XLNet model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--finetuning_task''',
default=None,
type=str,
help='''Name of a task on which the XLNet TensorFlow model was fine-tuned''',
)
lowerCamelCase__ = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 82 | 1 |
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
lowerCamelCase__ = 6378137.0
lowerCamelCase__ = 6356752.314245
lowerCamelCase__ = 637_8137
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ ) ->float:
'''simple docstring'''
_UpperCamelCase = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
_UpperCamelCase = atan((1 - flattening) * tan(radians(a__ ) ) )
_UpperCamelCase = atan((1 - flattening) * tan(radians(a__ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
_UpperCamelCase = haversine_distance(a__ , a__ , a__ , a__ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
_UpperCamelCase = (b_lata + b_lata) / 2
_UpperCamelCase = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
_UpperCamelCase = (sin(a__ ) ** 2) * (cos(a__ ) ** 2)
_UpperCamelCase = cos(sigma / 2 ) ** 2
_UpperCamelCase = (sigma - sin(a__ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
_UpperCamelCase = (cos(a__ ) ** 2) * (sin(a__ ) ** 2)
_UpperCamelCase = sin(sigma / 2 ) ** 2
_UpperCamelCase = (sigma + sin(a__ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _UpperCAmelCase ( pl.LightningModule ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase_ : Tuple) -> int:
"""simple docstring"""
super().__init__()
_UpperCamelCase = model
_UpperCamelCase = 2
_UpperCamelCase = nn.Linear(self.model.config.hidden_size , self.num_labels)
def __UpperCAmelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
pass
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->str:
'''simple docstring'''
_UpperCamelCase = LongformerModel.from_pretrained(a__ )
_UpperCamelCase = LightningModel(a__ )
_UpperCamelCase = torch.load(a__ , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
_UpperCamelCase = LongformerForQuestionAnswering.from_pretrained(a__ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(a__ )
print(f'Conversion successful. Model saved under {pytorch_dump_folder_path}' )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase__ = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 82 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase__ = {
'''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ResNetForImageClassification''',
'''ResNetModel''',
'''ResNetPreTrainedModel''',
'''ResNetBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFResNetForImageClassification''',
'''TFResNetModel''',
'''TFResNetPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''FlaxResNetForImageClassification''',
'''FlaxResNetModel''',
'''FlaxResNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 82 | import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowerCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : str , *lowercase_ : List[str] , **lowercase_ : Union[str, Any]) -> None:
"""simple docstring"""
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_)
| 82 | 1 |
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self : str) -> List[str]:
"""simple docstring"""
torch.manual_seed(0)
_UpperCamelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def __UpperCAmelCase ( self : str) -> Tuple:
"""simple docstring"""
_UpperCamelCase = self.dummy_uncond_unet
_UpperCamelCase = ScoreSdeVeScheduler()
_UpperCamelCase = ScoreSdeVePipeline(unet=lowercase_ , scheduler=lowercase_)
sde_ve.to(lowercase_)
sde_ve.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=lowercase_).images
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=lowercase_ , return_dict=lowercase_)[
0
]
_UpperCamelCase = image[0, -3:, -3:, -1]
_UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = "google/ncsnpp-church-256"
_UpperCamelCase = UNetaDModel.from_pretrained(lowercase_)
_UpperCamelCase = ScoreSdeVeScheduler.from_pretrained(lowercase_)
_UpperCamelCase = ScoreSdeVePipeline(unet=lowercase_ , scheduler=lowercase_)
sde_ve.to(lowercase_)
sde_ve.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = sde_ve(num_inference_steps=10 , output_type="numpy" , generator=lowercase_).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_UpperCamelCase = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 82 | import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( enum.Enum ):
'''simple docstring'''
__A = 0
__A = 1
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''generated'''
def __init__( self : Any , *lowercase_ : Dict , **lowercase_ : Tuple) -> List[Any]:
"""simple docstring"""
super().__init__(*lowercase_ , **lowercase_)
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING)
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : Union[str, Any]=None , lowercase_ : Optional[Any]=None , lowercase_ : Optional[int]=None , lowercase_ : Optional[Any]=None , lowercase_ : Any=None , lowercase_ : Union[str, Any]=None , **lowercase_ : Optional[Any] , ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = {}
if truncation is not None:
_UpperCamelCase = truncation
_UpperCamelCase = generate_kwargs
_UpperCamelCase = {}
if return_tensors is not None and return_type is None:
_UpperCamelCase = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
_UpperCamelCase = return_type
if clean_up_tokenization_spaces is not None:
_UpperCamelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
_UpperCamelCase = self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_)
if len(lowercase_) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim.")
_UpperCamelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __UpperCAmelCase ( self : int , lowercase_ : int , lowercase_ : int , lowercase_ : int) -> Any:
"""simple docstring"""
return True
def __UpperCAmelCase ( self : Dict , *lowercase_ : List[str] , lowercase_ : List[Any]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = self.model.config.prefix if self.model.config.prefix is not None else ""
if isinstance(args[0] , lowercase_):
if self.tokenizer.pad_token_id is None:
raise ValueError("Please make sure that the tokenizer has a pad_token_id when using a batch input")
_UpperCamelCase = ([prefix + arg for arg in args[0]],)
_UpperCamelCase = True
elif isinstance(args[0] , lowercase_):
_UpperCamelCase = (prefix + args[0],)
_UpperCamelCase = False
else:
raise ValueError(
f' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`')
_UpperCamelCase = self.tokenizer(*lowercase_ , padding=lowercase_ , truncation=lowercase_ , return_tensors=self.framework)
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : List[Any] , *lowercase_ : Any , **lowercase_ : int) -> Dict:
"""simple docstring"""
_UpperCamelCase = super().__call__(*lowercase_ , **lowercase_)
if (
isinstance(args[0] , lowercase_)
and all(isinstance(lowercase_ , lowercase_) for el in args[0])
and all(len(lowercase_) == 1 for res in result)
):
return [res[0] for res in result]
return result
def __UpperCAmelCase ( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : str=TruncationStrategy.DO_NOT_TRUNCATE , **lowercase_ : Dict) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = self._parse_and_tokenize(lowercase_ , truncation=lowercase_ , **lowercase_)
return inputs
def __UpperCAmelCase ( self : str , lowercase_ : str , **lowercase_ : str) -> str:
"""simple docstring"""
if self.framework == "pt":
_UpperCamelCase , _UpperCamelCase = model_inputs["input_ids"].shape
elif self.framework == "tf":
_UpperCamelCase , _UpperCamelCase = tf.shape(model_inputs["input_ids"]).numpy()
_UpperCamelCase = generate_kwargs.get("min_length" , self.model.config.min_length)
_UpperCamelCase = generate_kwargs.get("max_length" , self.model.config.max_length)
self.check_inputs(lowercase_ , generate_kwargs["min_length"] , generate_kwargs["max_length"])
_UpperCamelCase = self.model.generate(**lowercase_ , **lowercase_)
_UpperCamelCase = output_ids.shape[0]
if self.framework == "pt":
_UpperCamelCase = output_ids.reshape(lowercase_ , out_b // in_b , *output_ids.shape[1:])
elif self.framework == "tf":
_UpperCamelCase = tf.reshape(lowercase_ , (in_b, out_b // in_b, *output_ids.shape[1:]))
return {"output_ids": output_ids}
def __UpperCAmelCase ( self : Dict , lowercase_ : str , lowercase_ : int=ReturnType.TEXT , lowercase_ : int=False) -> Tuple:
"""simple docstring"""
_UpperCamelCase = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
_UpperCamelCase = {f'{self.return_name}_token_ids': output_ids}
elif return_type == ReturnType.TEXT:
_UpperCamelCase = {
f'{self.return_name}_text': self.tokenizer.decode(
lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ , )
}
records.append(lowercase_)
return records
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''summary'''
def __call__( self : Optional[Any] , *lowercase_ : int , **lowercase_ : Dict) -> Optional[int]:
"""simple docstring"""
return super().__call__(*lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : List[str] , lowercase_ : int , lowercase_ : int , lowercase_ : int) -> bool:
"""simple docstring"""
if max_length < min_length:
logger.warning(f'Your min_length={min_length} must be inferior than your max_length={max_length}.')
if input_length < max_length:
logger.warning(
f'Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '
"a summarization task, where outputs shorter than the input are typically wanted, you might "
f'consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})')
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''translation'''
def __UpperCAmelCase ( self : Dict , lowercase_ : int , lowercase_ : int , lowercase_ : int) -> int:
"""simple docstring"""
if input_length > 0.9 * max_length:
logger.warning(
f'Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '
"increasing your max_length manually, e.g. translator('...', max_length=400)")
return True
def __UpperCAmelCase ( self : Tuple , *lowercase_ : Any , lowercase_ : List[Any]=TruncationStrategy.DO_NOT_TRUNCATE , lowercase_ : Any=None , lowercase_ : Optional[Any]=None) -> List[str]:
"""simple docstring"""
if getattr(self.tokenizer , "_build_translation_inputs" , lowercase_):
return self.tokenizer._build_translation_inputs(
*lowercase_ , return_tensors=self.framework , truncation=lowercase_ , src_lang=lowercase_ , tgt_lang=lowercase_)
else:
return super()._parse_and_tokenize(*lowercase_ , truncation=lowercase_)
def __UpperCAmelCase ( self : List[str] , lowercase_ : Dict=None , lowercase_ : str=None , **lowercase_ : List[Any]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = super()._sanitize_parameters(**lowercase_)
if src_lang is not None:
_UpperCamelCase = src_lang
if tgt_lang is not None:
_UpperCamelCase = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
_UpperCamelCase = kwargs.get("task" , self.task)
_UpperCamelCase = task.split("_")
if task and len(lowercase_) == 4:
# translation, XX, to YY
_UpperCamelCase = items[1]
_UpperCamelCase = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[str] , *lowercase_ : List[str] , **lowercase_ : str) -> Union[str, Any]:
"""simple docstring"""
return super().__call__(*lowercase_ , **lowercase_)
| 82 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( a__ ) ->str:
'''simple docstring'''
if "resnet-50" in model_name:
_UpperCamelCase = ResNetConfig.from_pretrained("microsoft/resnet-50" )
elif "resnet-101" in model_name:
_UpperCamelCase = ResNetConfig.from_pretrained("microsoft/resnet-101" )
else:
raise ValueError("Model name should include either resnet50 or resnet101" )
_UpperCamelCase = DetrConfig(use_timm_backbone=a__ , backbone_config=a__ )
# set label attributes
_UpperCamelCase = "panoptic" in model_name
if is_panoptic:
_UpperCamelCase = 250
else:
_UpperCamelCase = 91
_UpperCamelCase = "huggingface/label-files"
_UpperCamelCase = "coco-detection-id2label.json"
_UpperCamelCase = json.load(open(hf_hub_download(a__ , a__ , repo_type="dataset" ) , "r" ) )
_UpperCamelCase = {int(a__ ): v for k, v in idalabel.items()}
_UpperCamelCase = idalabel
_UpperCamelCase = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def lowerCAmelCase__ ( a__ ) ->Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight") )
rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight") )
rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias") )
rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean") )
rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var") )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
f'transformer.encoder.layers.{i}.self_attn.out_proj.weight',
f'encoder.layers.{i}.self_attn.out_proj.weight',
) )
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias') )
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
f'transformer.decoder.layers.{i}.self_attn.out_proj.weight',
f'decoder.layers.{i}.self_attn.out_proj.weight',
) )
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
) )
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
) )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
] )
return rename_keys
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->List[Any]:
'''simple docstring'''
_UpperCamelCase = state_dict.pop(a__ )
_UpperCamelCase = val
def lowerCAmelCase__ ( a__ , a__=False ) ->List[Any]:
'''simple docstring'''
_UpperCamelCase = ""
if is_panoptic:
_UpperCamelCase = "detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_UpperCamelCase = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
_UpperCamelCase = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase = in_proj_weight[:256, :]
_UpperCamelCase = in_proj_bias[:256]
_UpperCamelCase = in_proj_weight[256:512, :]
_UpperCamelCase = in_proj_bias[256:512]
_UpperCamelCase = in_proj_weight[-256:, :]
_UpperCamelCase = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
_UpperCamelCase = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
_UpperCamelCase = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase = in_proj_weight[:256, :]
_UpperCamelCase = in_proj_bias[:256]
_UpperCamelCase = in_proj_weight[256:512, :]
_UpperCamelCase = in_proj_bias[256:512]
_UpperCamelCase = in_proj_weight[-256:, :]
_UpperCamelCase = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
_UpperCamelCase = state_dict.pop(
f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
_UpperCamelCase = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
_UpperCamelCase = in_proj_weight_cross_attn[:256, :]
_UpperCamelCase = in_proj_bias_cross_attn[:256]
_UpperCamelCase = in_proj_weight_cross_attn[256:512, :]
_UpperCamelCase = in_proj_bias_cross_attn[256:512]
_UpperCamelCase = in_proj_weight_cross_attn[-256:, :]
_UpperCamelCase = in_proj_bias_cross_attn[-256:]
def lowerCAmelCase__ ( ) ->Tuple:
'''simple docstring'''
_UpperCamelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCamelCase = Image.open(requests.get(a__ , stream=a__ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase__ ( a__ , a__=None , a__=False ) ->Dict:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = get_detr_config(a__ )
# load original model from torch hub
_UpperCamelCase = {
"detr-resnet-50": "detr_resnet50",
"detr-resnet-101": "detr_resnet101",
}
logger.info(f'Converting model {model_name}...' )
_UpperCamelCase = torch.hub.load("facebookresearch/detr" , model_name_to_original_name[model_name] , pretrained=a__ ).eval()
_UpperCamelCase = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(a__ ):
if is_panoptic:
_UpperCamelCase = "detr." + src
rename_key(a__ , a__ , a__ )
# query, key and value matrices need special treatment
read_in_q_k_v(a__ , is_panoptic=a__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_UpperCamelCase = "detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
_UpperCamelCase = state_dict.pop(a__ )
_UpperCamelCase = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_UpperCamelCase = state_dict.pop(a__ )
_UpperCamelCase = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
_UpperCamelCase = state_dict.pop(a__ )
_UpperCamelCase = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
_UpperCamelCase = state_dict.pop(a__ )
_UpperCamelCase = val
# finally, create HuggingFace model and load state dict
_UpperCamelCase = DetrForSegmentation(a__ ) if is_panoptic else DetrForObjectDetection(a__ )
model.load_state_dict(a__ )
model.eval()
# verify our conversion on an image
_UpperCamelCase = "coco_panoptic" if is_panoptic else "coco_detection"
_UpperCamelCase = DetrImageProcessor(format=a__ )
_UpperCamelCase = processor(images=prepare_img() , return_tensors="pt" )
_UpperCamelCase = encoding["pixel_values"]
_UpperCamelCase = detr(a__ )
_UpperCamelCase = model(a__ )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1e-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(a__ ).mkdir(exist_ok=a__ )
model.save_pretrained(a__ )
processor.save_pretrained(a__ )
if push_to_hub:
# Upload model and image processor to the hub
logger.info("Uploading PyTorch model and image processor to the hub..." )
model.push_to_hub(f'nielsr/{model_name}' )
processor.push_to_hub(f'nielsr/{model_name}' )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''detr-resnet-50''',
type=str,
choices=['''detr-resnet-50''', '''detr-resnet-101'''],
help='''Name of the DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the model to the hub or not.''')
lowerCamelCase__ = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 82 | import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'''vocab_file''': '''spiece.model'''}
lowerCamelCase__ = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
}
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCamelCase__ = {
'''t5-small''': 512,
'''t5-base''': 512,
'''t5-large''': 512,
'''t5-3b''': 512,
'''t5-11b''': 512,
}
lowerCamelCase__ = '''▁'''
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , lowercase_ : int , lowercase_ : str="</s>" , lowercase_ : Optional[Any]="<unk>" , lowercase_ : Dict="<pad>" , lowercase_ : Tuple=100 , lowercase_ : str=None , lowercase_ : Optional[Dict[str, Any]] = None , lowercase_ : str=True , **lowercase_ : Optional[Any] , ) -> None:
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
_UpperCamelCase = [f'<extra_id_{i}>' for i in range(lowercase_)]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_UpperCamelCase = len(set(filter(lambda lowercase_: bool("extra_id" in str(lowercase_)) , lowercase_)))
if extra_tokens != extra_ids:
raise ValueError(
f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens")
if legacy:
logger.warning_once(
f'You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'
" read the related pull request available at https://github.com/huggingface/transformers/pull/24565")
_UpperCamelCase = legacy
_UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , extra_ids=lowercase_ , additional_special_tokens=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , legacy=lowercase_ , **lowercase_ , )
_UpperCamelCase = vocab_file
_UpperCamelCase = extra_ids
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(lowercase_)
@staticmethod
def __UpperCAmelCase ( lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : str) -> Any:
"""simple docstring"""
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
_UpperCamelCase = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
f' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
f' {pretrained_model_name_or_path} automatically truncating your input to'
f' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
f' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , lowercase_ , )
return max_model_length
@property
def __UpperCAmelCase ( self : Dict) -> Optional[int]:
"""simple docstring"""
return self.sp_model.get_piece_size() + self._extra_ids
def __UpperCAmelCase ( self : Dict) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = {self.convert_ids_to_tokens(lowercase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __UpperCAmelCase ( self : Dict , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_)
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowercase_)) + [1]
return ([0] * len(lowercase_)) + [1] + ([0] * len(lowercase_)) + [1]
def __UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
return list(
set(filter(lambda lowercase_: bool(re.search(R"<extra_id_\d+>" , lowercase_)) is not None , self.additional_special_tokens)))
def __UpperCAmelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
return [self._convert_token_to_id(lowercase_) for token in self.get_sentinel_tokens()]
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : List[int]) -> List[int]:
"""simple docstring"""
if len(lowercase_) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
" eos tokens being added.")
return token_ids
else:
return token_ids + [self.eos_token_id]
def __UpperCAmelCase ( self : List[str] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCamelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos) * [0]
return len(token_ids_a + eos + token_ids_a + eos) * [0]
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCamelCase = self._add_eos_if_not_present(lowercase_)
if token_ids_a is None:
return token_ids_a
else:
_UpperCamelCase = self._add_eos_if_not_present(lowercase_)
return token_ids_a + token_ids_a
def __getstate__( self : Tuple) -> Any:
"""simple docstring"""
_UpperCamelCase = self.__dict__.copy()
_UpperCamelCase = None
return state
def __setstate__( self : Optional[Any] , lowercase_ : Any) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs"):
_UpperCamelCase = {}
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def __UpperCAmelCase ( self : int , lowercase_ : "TextInput" , **lowercase_ : Optional[int]) -> List[str]:
"""simple docstring"""
if not self.legacy:
_UpperCamelCase = SPIECE_UNDERLINE + text.replace(lowercase_ , " ")
return super().tokenize(lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : int , **lowercase_ : Optional[int]) -> List[str]:
"""simple docstring"""
if not self.legacy:
_UpperCamelCase = text.startswith(lowercase_)
if is_first:
_UpperCamelCase = text[1:]
_UpperCamelCase = self.sp_model.encode(lowercase_ , out_type=lowercase_)
if not self.legacy and not is_first and not text.startswith(" ") and tokens[0].startswith(lowercase_):
_UpperCamelCase = ([tokens[0][1:]] if len(tokens[0]) > 1 else []) + tokens[1:]
return tokens
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Optional[Any]) -> List[Any]:
"""simple docstring"""
if token.startswith("<extra_id_"):
_UpperCamelCase = re.match(R"<extra_id_(\d+)>" , lowercase_)
_UpperCamelCase = int(match.group(1))
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(lowercase_)
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Any) -> int:
"""simple docstring"""
if index < self.sp_model.get_piece_size():
_UpperCamelCase = self.sp_model.IdToPiece(lowercase_)
else:
_UpperCamelCase = f'<extra_id_{self.vocab_size - 1 - index}>'
return token
def __UpperCAmelCase ( self : Dict , lowercase_ : Optional[int]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = []
_UpperCamelCase = ""
_UpperCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase_) + token
_UpperCamelCase = True
_UpperCamelCase = []
else:
current_sub_tokens.append(lowercase_)
_UpperCamelCase = False
out_string += self.sp_model.decode(lowercase_)
return out_string.strip()
def __UpperCAmelCase ( self : List[str] , lowercase_ : str , lowercase_ : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowercase_):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
_UpperCamelCase = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowercase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowercase_)
elif not os.path.isfile(self.vocab_file):
with open(lowercase_ , "wb") as fi:
_UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(lowercase_)
return (out_vocab_file,)
| 82 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''facebook/bart-large-mnli'''
__A = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
__A = '''text_classifier'''
__A = AutoTokenizer
__A = AutoModelForSequenceClassification
__A = ['''text''', ['''text''']]
__A = ['''text''']
def __UpperCAmelCase ( self : Any) -> Dict:
"""simple docstring"""
super().setup()
_UpperCamelCase = self.model.config
_UpperCamelCase = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("entail"):
_UpperCamelCase = int(lowercase_)
if self.entailment_id == -1:
raise ValueError("Could not determine the entailment ID from the model config, please pass it at init.")
def __UpperCAmelCase ( self : Any , lowercase_ : List[Any] , lowercase_ : Tuple) -> Dict:
"""simple docstring"""
_UpperCamelCase = labels
return self.pre_processor(
[text] * len(lowercase_) , [f'This example is {label}' for label in labels] , return_tensors="pt" , padding="max_length" , )
def __UpperCAmelCase ( self : Tuple , lowercase_ : Union[str, Any]) -> str:
"""simple docstring"""
_UpperCamelCase = outputs.logits
_UpperCamelCase = torch.argmax(logits[:, 2]).item()
return self._labels[label_id]
| 82 | from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowerCAmelCase__ ( a__ ) ->str:
'''simple docstring'''
return getitem, k
def lowerCAmelCase__ ( a__ , a__ ) ->Tuple:
'''simple docstring'''
return setitem, k, v
def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
return delitem, k
def lowerCAmelCase__ ( a__ , a__ , *a__ ) ->List[str]:
'''simple docstring'''
try:
return fun(a__ , *a__ ), None
except Exception as e:
return None, e
lowerCamelCase__ = (
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
)
lowerCamelCase__ = [
_set('''key_a''', '''val_a'''),
_set('''key_a''', '''val_b'''),
]
lowerCamelCase__ = [
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
_del('''key_a'''),
_del('''key_b'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
]
lowerCamelCase__ = [
_get('''key_a'''),
_del('''key_a'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
_del('''key_a'''),
_get('''key_a'''),
]
lowerCamelCase__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
lowerCamelCase__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('''key_a''', '''val_b'''),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def lowerCAmelCase__ ( a__ ) ->Dict:
'''simple docstring'''
_UpperCamelCase = HashMap(initial_block_size=4 )
_UpperCamelCase = {}
for _, (fun, *args) in enumerate(a__ ):
_UpperCamelCase , _UpperCamelCase = _run_operation(a__ , a__ , *a__ )
_UpperCamelCase , _UpperCamelCase = _run_operation(a__ , a__ , *a__ )
assert my_res == py_res
assert str(a__ ) == str(a__ )
assert set(a__ ) == set(a__ )
assert len(a__ ) == len(a__ )
assert set(my.items() ) == set(py.items() )
def lowerCAmelCase__ ( ) ->List[Any]:
'''simple docstring'''
def is_public(a__ ) -> bool:
return not name.startswith("_" )
_UpperCamelCase = {name for name in dir({} ) if is_public(a__ )}
_UpperCamelCase = {name for name in dir(HashMap() ) if is_public(a__ )}
assert dict_public_names > hash_public_names
| 82 | 1 |
def lowerCAmelCase__ ( a__ ) ->Tuple:
'''simple docstring'''
_UpperCamelCase = 0
_UpperCamelCase = len(a__ )
for i in range(n - 1 ):
for j in range(i + 1 , a__ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def lowerCAmelCase__ ( a__ ) ->Union[str, Any]:
'''simple docstring'''
if len(a__ ) <= 1:
return arr, 0
_UpperCamelCase = len(a__ ) // 2
_UpperCamelCase = arr[0:mid]
_UpperCamelCase = arr[mid:]
_UpperCamelCase , _UpperCamelCase = count_inversions_recursive(a__ )
_UpperCamelCase , _UpperCamelCase = count_inversions_recursive(a__ )
_UpperCamelCase , _UpperCamelCase = _count_cross_inversions(a__ , a__ )
_UpperCamelCase = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def lowerCAmelCase__ ( a__ , a__ ) ->Optional[int]:
'''simple docstring'''
_UpperCamelCase = []
_UpperCamelCase = _UpperCamelCase = _UpperCamelCase = 0
while i < len(a__ ) and j < len(a__ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(a__ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(a__ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def lowerCAmelCase__ ( ) ->Optional[int]:
'''simple docstring'''
_UpperCamelCase = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
_UpperCamelCase = count_inversions_bf(a__ )
_UpperCamelCase , _UpperCamelCase = count_inversions_recursive(a__ )
assert num_inversions_bf == num_inversions_recursive == 8
print("number of inversions = " , a__ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
_UpperCamelCase = count_inversions_bf(a__ )
_UpperCamelCase , _UpperCamelCase = count_inversions_recursive(a__ )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " , a__ )
# an empty list should also have zero inversions
_UpperCamelCase = []
_UpperCamelCase = count_inversions_bf(a__ )
_UpperCamelCase , _UpperCamelCase = count_inversions_recursive(a__ )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " , a__ )
if __name__ == "__main__":
main()
| 82 | import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _UpperCAmelCase ( lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = KandinskyVaaControlnetImgaImgPipeline
__A = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
__A = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
__A = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
__A = False
@property
def __UpperCAmelCase ( self : List[Any]) -> Tuple:
"""simple docstring"""
return 32
@property
def __UpperCAmelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
return 32
@property
def __UpperCAmelCase ( self : Optional[int]) -> str:
"""simple docstring"""
return self.time_input_dim
@property
def __UpperCAmelCase ( self : List[str]) -> Any:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
return 100
@property
def __UpperCAmelCase ( self : Dict) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0)
_UpperCamelCase = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
_UpperCamelCase = UNetaDConditionModel(**lowercase_)
return model
@property
def __UpperCAmelCase ( self : int) -> Optional[int]:
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __UpperCAmelCase ( self : int) -> Dict:
"""simple docstring"""
torch.manual_seed(0)
_UpperCamelCase = VQModel(**self.dummy_movq_kwargs)
return model
def __UpperCAmelCase ( self : int) -> Any:
"""simple docstring"""
_UpperCamelCase = self.dummy_unet
_UpperCamelCase = self.dummy_movq
_UpperCamelCase = {
"num_train_timesteps": 1000,
"beta_schedule": "linear",
"beta_start": 0.0_00_85,
"beta_end": 0.0_12,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
_UpperCamelCase = DDIMScheduler(**lowercase_)
_UpperCamelCase = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __UpperCAmelCase ( self : str , lowercase_ : Dict , lowercase_ : List[str]=0) -> List[str]:
"""simple docstring"""
_UpperCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowercase_)).to(lowercase_)
_UpperCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to(
lowercase_)
# create init_image
_UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase_)).to(lowercase_)
_UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1)[0]
_UpperCamelCase = Image.fromarray(np.uinta(lowercase_)).convert("RGB").resize((256, 256))
# create hint
_UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase_)).to(lowercase_)
if str(lowercase_).startswith("mps"):
_UpperCamelCase = torch.manual_seed(lowercase_)
else:
_UpperCamelCase = torch.Generator(device=lowercase_).manual_seed(lowercase_)
_UpperCamelCase = {
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def __UpperCAmelCase ( self : Any) -> str:
"""simple docstring"""
_UpperCamelCase = "cpu"
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = self.pipeline_class(**lowercase_)
_UpperCamelCase = pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = pipe(**self.get_dummy_inputs(lowercase_))
_UpperCamelCase = output.images
_UpperCamelCase = pipe(
**self.get_dummy_inputs(lowercase_) , return_dict=lowercase_ , )[0]
_UpperCamelCase = image[0, -3:, -3:, -1]
_UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase = np.array(
[0.54_98_50_34, 0.55_50_93_65, 0.52_56_15_04, 0.5_57_04_94, 0.5_59_38_18, 0.5_26_39_79, 0.50_28_56_43, 0.5_06_98_46, 0.51_19_67_36])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : Union[str, Any]) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
_UpperCamelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy")
_UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png")
_UpperCamelCase = init_image.resize((512, 512))
_UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png")
_UpperCamelCase = torch.from_numpy(np.array(lowercase_)).float() / 2_55.0
_UpperCamelCase = hint.permute(2 , 0 , 1).unsqueeze(0)
_UpperCamelCase = "A robot, 4k photo"
_UpperCamelCase = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa)
pipe_prior.to(lowercase_)
_UpperCamelCase = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa)
_UpperCamelCase = pipeline.to(lowercase_)
pipeline.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = torch.Generator(device="cpu").manual_seed(0)
_UpperCamelCase , _UpperCamelCase = pipe_prior(
lowercase_ , image=lowercase_ , strength=0.85 , generator=lowercase_ , negative_prompt="" , ).to_tuple()
_UpperCamelCase = pipeline(
image=lowercase_ , image_embeds=lowercase_ , negative_image_embeds=lowercase_ , hint=lowercase_ , generator=lowercase_ , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type="np" , )
_UpperCamelCase = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(lowercase_ , lowercase_)
| 82 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''shi-labs/nat-mini-in1k-224''': '''https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json''',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class _UpperCAmelCase ( lowerCAmelCase, lowerCAmelCase ):
'''simple docstring'''
__A = '''nat'''
__A = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : int , lowercase_ : Optional[Any]=4 , lowercase_ : List[str]=3 , lowercase_ : int=64 , lowercase_ : Tuple=[3, 4, 6, 5] , lowercase_ : Tuple=[2, 4, 8, 16] , lowercase_ : Optional[Any]=7 , lowercase_ : List[Any]=3.0 , lowercase_ : Union[str, Any]=True , lowercase_ : Tuple=0.0 , lowercase_ : str=0.0 , lowercase_ : Optional[Any]=0.1 , lowercase_ : Any="gelu" , lowercase_ : str=0.02 , lowercase_ : Dict=1e-5 , lowercase_ : Dict=0.0 , lowercase_ : Tuple=None , lowercase_ : Tuple=None , **lowercase_ : Tuple , ) -> Any:
"""simple docstring"""
super().__init__(**lowercase_)
_UpperCamelCase = patch_size
_UpperCamelCase = num_channels
_UpperCamelCase = embed_dim
_UpperCamelCase = depths
_UpperCamelCase = len(lowercase_)
_UpperCamelCase = num_heads
_UpperCamelCase = kernel_size
_UpperCamelCase = mlp_ratio
_UpperCamelCase = qkv_bias
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = drop_path_rate
_UpperCamelCase = hidden_act
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_UpperCamelCase = int(embed_dim * 2 ** (len(lowercase_) - 1))
_UpperCamelCase = layer_scale_init_value
_UpperCamelCase = ["stem"] + [f'stage{idx}' for idx in range(1 , len(lowercase_) + 1)]
_UpperCamelCase , _UpperCamelCase = get_aligned_output_features_output_indices(
out_features=lowercase_ , out_indices=lowercase_ , stage_names=self.stage_names)
| 82 | def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
assert (
isinstance(a__ , a__ ) and number_of_steps > 0
), f'number_of_steps needs to be positive integer, your input {number_of_steps}'
if number_of_steps == 1:
return 1
_UpperCamelCase , _UpperCamelCase = 1, 1
for _ in range(number_of_steps - 1 ):
_UpperCamelCase , _UpperCamelCase = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | 1 |
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : str) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(lowercase_ , "hidden_sizes"))
self.parent.assertTrue(hasattr(lowercase_ , "num_attention_heads"))
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : List[Any] , lowercase_ : Optional[int]=13 , lowercase_ : Union[str, Any]=64 , lowercase_ : Union[str, Any]=3 , lowercase_ : Union[str, Any]=3 , lowercase_ : List[str]=2 , lowercase_ : List[Any]=1 , lowercase_ : Dict=16 , lowercase_ : Optional[int]=[128, 256, 384] , lowercase_ : int=[4, 6, 8] , lowercase_ : Dict=[2, 3, 4] , lowercase_ : Union[str, Any]=[16, 16, 16] , lowercase_ : Optional[int]=0 , lowercase_ : Dict=[2, 2, 2] , lowercase_ : List[str]=[2, 2, 2] , lowercase_ : Dict=0.02 , lowercase_ : Dict=True , lowercase_ : str=True , lowercase_ : Optional[Any]=2 , ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = image_size
_UpperCamelCase = num_channels
_UpperCamelCase = kernel_size
_UpperCamelCase = stride
_UpperCamelCase = padding
_UpperCamelCase = hidden_sizes
_UpperCamelCase = num_attention_heads
_UpperCamelCase = depths
_UpperCamelCase = key_dim
_UpperCamelCase = drop_path_rate
_UpperCamelCase = patch_size
_UpperCamelCase = attention_ratio
_UpperCamelCase = mlp_ratio
_UpperCamelCase = initializer_range
_UpperCamelCase = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = num_labels
_UpperCamelCase = initializer_range
def __UpperCAmelCase ( self : Optional[int]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels)
_UpperCamelCase = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self : Dict) -> int:
"""simple docstring"""
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def __UpperCAmelCase ( self : List[str] , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : Union[str, Any]) -> int:
"""simple docstring"""
_UpperCamelCase = LevitModel(config=lowercase_)
model.to(lowercase_)
model.eval()
_UpperCamelCase = model(lowercase_)
_UpperCamelCase = (self.image_size, self.image_size)
_UpperCamelCase , _UpperCamelCase = image_size[0], image_size[1]
for _ in range(4):
_UpperCamelCase = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1)
_UpperCamelCase = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4) * ceil(width / 4), self.hidden_sizes[-1]) , )
def __UpperCAmelCase ( self : str , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : List[Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = self.num_labels
_UpperCamelCase = LevitForImageClassification(lowercase_)
model.to(lowercase_)
model.eval()
_UpperCamelCase = model(lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __UpperCAmelCase ( self : Any) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( lowerCAmelCase, lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
__A = (
{
'''feature-extraction''': LevitModel,
'''image-classification''': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__A = False
__A = False
__A = False
__A = False
__A = False
def __UpperCAmelCase ( self : List[Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = LevitModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37)
def __UpperCAmelCase ( self : Any) -> Tuple:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCAmelCase ( self : List[str]) -> List[Any]:
"""simple docstring"""
return
@unittest.skip(reason="Levit does not use inputs_embeds")
def __UpperCAmelCase ( self : Tuple) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="Levit does not support input and output embeddings")
def __UpperCAmelCase ( self : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="Levit does not output attentions")
def __UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
pass
def __UpperCAmelCase ( self : List[Any]) -> Tuple:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(lowercase_)
_UpperCamelCase = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase_)
def __UpperCAmelCase ( self : Optional[Any]) -> List[Any]:
"""simple docstring"""
def check_hidden_states_output(lowercase_ : Any , lowercase_ : Dict , lowercase_ : Any):
_UpperCamelCase = model_class(lowercase_)
model.to(lowercase_)
model.eval()
with torch.no_grad():
_UpperCamelCase = model(**self._prepare_for_class(lowercase_ , lowercase_))
_UpperCamelCase = outputs.hidden_states
_UpperCamelCase = len(self.model_tester.depths) + 1
self.assertEqual(len(lowercase_) , lowercase_)
_UpperCamelCase = (self.model_tester.image_size, self.model_tester.image_size)
_UpperCamelCase , _UpperCamelCase = image_size[0], image_size[1]
for _ in range(4):
_UpperCamelCase = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1)
_UpperCamelCase = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1)
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_)
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests.")
def __UpperCAmelCase ( self : Union[str, Any]) -> int:
"""simple docstring"""
pass
def __UpperCAmelCase ( self : List[str] , lowercase_ : Any , lowercase_ : str , lowercase_ : int=False) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = super()._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_)
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __UpperCAmelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def __UpperCAmelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_)
def __UpperCAmelCase ( self : Tuple) -> str:
"""simple docstring"""
if not self.model_tester.is_training:
return
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowercase_)
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
_UpperCamelCase = model_class(lowercase_)
model.to(lowercase_)
model.train()
_UpperCamelCase = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_)
_UpperCamelCase = model(**lowercase_).loss
loss.backward()
def __UpperCAmelCase ( self : str) -> str:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_UpperCamelCase = False
_UpperCamelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(lowercase_) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
_UpperCamelCase = model_class(lowercase_)
model.gradient_checkpointing_enable()
model.to(lowercase_)
model.train()
_UpperCamelCase = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_)
_UpperCamelCase = model(**lowercase_).loss
loss.backward()
def __UpperCAmelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowercase_),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f'Testing {model_class} with {problem_type["title"]}'):
_UpperCamelCase = problem_type["title"]
_UpperCamelCase = problem_type["num_labels"]
_UpperCamelCase = model_class(lowercase_)
model.to(lowercase_)
model.train()
_UpperCamelCase = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_)
if problem_type["num_labels"] > 1:
_UpperCamelCase = inputs["labels"].unsqueeze(1).repeat(1 , problem_type["num_labels"])
_UpperCamelCase = inputs["labels"].to(problem_type["dtype"])
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowercase_) as warning_list:
_UpperCamelCase = model(**lowercase_).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message):
raise ValueError(
f'Something is going wrong in the regression problem: intercepted {w.message}')
loss.backward()
@slow
def __UpperCAmelCase ( self : str) -> Optional[int]:
"""simple docstring"""
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = LevitModel.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
def lowerCAmelCase__ ( ) ->List[str]:
'''simple docstring'''
_UpperCamelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCAmelCase ( self : Any) -> int:
"""simple docstring"""
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0])
@slow
def __UpperCAmelCase ( self : List[Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(
lowercase_)
_UpperCamelCase = self.default_image_processor
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=lowercase_ , return_tensors="pt").to(lowercase_)
# forward pass
with torch.no_grad():
_UpperCamelCase = model(**lowercase_)
# verify the logits
_UpperCamelCase = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , lowercase_)
_UpperCamelCase = torch.tensor([1.04_48, -0.37_45, -1.83_17]).to(lowercase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1e-4))
| 82 | from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , **lowercase_ : Tuple) -> Any:
"""simple docstring"""
super().__init__(**lowercase_)
if self.framework == "tf":
raise ValueError(f'The {self.__class__} is only available in PyTorch.')
requires_backends(self , "vision")
self.check_model_type(lowercase_)
def __call__( self : str , lowercase_ : Union[str, "Image.Image", List[Dict[str, Any]]] , lowercase_ : Union[str, List[str]] = None , **lowercase_ : str , ) -> List[str]:
"""simple docstring"""
if "text_queries" in kwargs:
_UpperCamelCase = kwargs.pop("text_queries")
if isinstance(lowercase_ , (str, Image.Image)):
_UpperCamelCase = {"image": image, "candidate_labels": candidate_labels}
else:
_UpperCamelCase = image
_UpperCamelCase = super().__call__(lowercase_ , **lowercase_)
return results
def __UpperCAmelCase ( self : Any , **lowercase_ : int) -> List[str]:
"""simple docstring"""
_UpperCamelCase = {}
if "threshold" in kwargs:
_UpperCamelCase = kwargs["threshold"]
if "top_k" in kwargs:
_UpperCamelCase = kwargs["top_k"]
return {}, {}, postprocess_params
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Any) -> List[str]:
"""simple docstring"""
_UpperCamelCase = load_image(inputs["image"])
_UpperCamelCase = inputs["candidate_labels"]
if isinstance(lowercase_ , lowercase_):
_UpperCamelCase = candidate_labels.split(",")
_UpperCamelCase = torch.tensor([[image.height, image.width]] , dtype=torch.intaa)
for i, candidate_label in enumerate(lowercase_):
_UpperCamelCase = self.tokenizer(lowercase_ , return_tensors=self.framework)
_UpperCamelCase = self.image_processor(lowercase_ , return_tensors=self.framework)
yield {
"is_last": i == len(lowercase_) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def __UpperCAmelCase ( self : Dict , lowercase_ : Tuple) -> str:
"""simple docstring"""
_UpperCamelCase = model_inputs.pop("target_size")
_UpperCamelCase = model_inputs.pop("candidate_label")
_UpperCamelCase = model_inputs.pop("is_last")
_UpperCamelCase = self.model(**lowercase_)
_UpperCamelCase = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def __UpperCAmelCase ( self : int , lowercase_ : Tuple , lowercase_ : List[str]=0.1 , lowercase_ : int=None) -> List[str]:
"""simple docstring"""
_UpperCamelCase = []
for model_output in model_outputs:
_UpperCamelCase = model_output["candidate_label"]
_UpperCamelCase = BaseModelOutput(lowercase_)
_UpperCamelCase = self.image_processor.post_process_object_detection(
outputs=lowercase_ , threshold=lowercase_ , target_sizes=model_output["target_size"])[0]
for index in outputs["scores"].nonzero():
_UpperCamelCase = outputs["scores"][index].item()
_UpperCamelCase = self._get_bounding_box(outputs["boxes"][index][0])
_UpperCamelCase = {"score": score, "label": label, "box": box}
results.append(lowercase_)
_UpperCamelCase = sorted(lowercase_ , key=lambda lowercase_: x["score"] , reverse=lowercase_)
if top_k:
_UpperCamelCase = results[:top_k]
return results
def __UpperCAmelCase ( self : str , lowercase_ : "torch.Tensor") -> Dict[str, int]:
"""simple docstring"""
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch.")
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = box.int().tolist()
_UpperCamelCase = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 82 | 1 |
def lowerCAmelCase__ ( a__ , a__ ) ->float:
'''simple docstring'''
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : List[str] , lowercase_ : str) -> str:
"""simple docstring"""
with open(lowercase_ , encoding="utf-8") as input_file:
_UpperCamelCase = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)")
_UpperCamelCase = input_file.read()
_UpperCamelCase = regexp.search(lowercase_)
return match
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : str) -> int:
"""simple docstring"""
with open(lowercase_ , encoding="utf-8") as input_file:
_UpperCamelCase = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL)
_UpperCamelCase = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
_UpperCamelCase = regexp.finditer(lowercase_)
_UpperCamelCase = [match for match in matches if match is not None and match.group(1) is not None]
return matches[0] if matches else None
def __UpperCAmelCase ( self : int) -> int:
"""simple docstring"""
_UpperCamelCase = Path("./datasets")
_UpperCamelCase = list(dataset_paths.absolute().glob("**/*.py"))
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(lowercase_)):
raise AssertionError(f'open(...) must use utf-8 encoding in {dataset}')
def __UpperCAmelCase ( self : str) -> str:
"""simple docstring"""
_UpperCamelCase = Path("./datasets")
_UpperCamelCase = list(dataset_paths.absolute().glob("**/*.py"))
for dataset in dataset_files:
if self._no_print_statements(str(lowercase_)):
raise AssertionError(f'print statement found in {dataset}. Use datasets.logger/logging instead.')
| 82 | 1 |
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->int:
'''simple docstring'''
_UpperCamelCase = 1.5
_UpperCamelCase = int(factor * num_class_images )
_UpperCamelCase = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=a__ , aesthetic_weight=0.1 )
os.makedirs(f'{class_data_dir}/images' , exist_ok=a__ )
if len(list(Path(f'{class_data_dir}/images' ).iterdir() ) ) >= num_class_images:
return
while True:
_UpperCamelCase = client.query(text=a__ )
if len(a__ ) >= factor * num_class_images or num_images > 1e4:
break
else:
_UpperCamelCase = int(factor * num_images )
_UpperCamelCase = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=a__ , aesthetic_weight=0.1 , )
_UpperCamelCase = 0
_UpperCamelCase = 0
_UpperCamelCase = tqdm(desc="downloading real regularization images" , total=a__ )
with open(f'{class_data_dir}/caption.txt' , "w" ) as fa, open(f'{class_data_dir}/urls.txt' , "w" ) as fa, open(
f'{class_data_dir}/images.txt' , "w" ) as fa:
while total < num_class_images:
_UpperCamelCase = class_images[count]
count += 1
try:
_UpperCamelCase = requests.get(images["url"] )
if img.status_code == 200:
_UpperCamelCase = Image.open(BytesIO(img.content ) )
with open(f'{class_data_dir}/images/{total}.jpg' , "wb" ) as f:
f.write(img.content )
fa.write(images["caption"] + "\n" )
fa.write(images["url"] + "\n" )
fa.write(f'{class_data_dir}/images/{total}.jpg' + "\n" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowerCAmelCase__ ( ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase = argparse.ArgumentParser("" , add_help=a__ )
parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=a__ , type=a__ )
parser.add_argument("--class_data_dir" , help="path to save images" , required=a__ , type=a__ )
parser.add_argument("--num_class_images" , help="number of images to download" , default=200 , type=a__ )
return parser.parse_args()
if __name__ == "__main__":
lowerCamelCase__ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 82 | import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowerCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : str = None , lowercase_ : uuid.UUID = None , lowercase_ : List[Any]=None , lowercase_ : int=None) -> Dict:
"""simple docstring"""
if not conversation_id:
_UpperCamelCase = uuid.uuida()
if past_user_inputs is None:
_UpperCamelCase = []
if generated_responses is None:
_UpperCamelCase = []
_UpperCamelCase = conversation_id
_UpperCamelCase = past_user_inputs
_UpperCamelCase = generated_responses
_UpperCamelCase = text
def __eq__( self : Optional[Any] , lowercase_ : Optional[Any]) -> List[Any]:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def __UpperCAmelCase ( self : List[Any] , lowercase_ : str , lowercase_ : bool = False) -> Any:
"""simple docstring"""
if self.new_user_input:
if overwrite:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
f'with: "{text}".')
_UpperCamelCase = text
else:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
f'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input')
else:
_UpperCamelCase = text
def __UpperCAmelCase ( self : Optional[int]) -> List[Any]:
"""simple docstring"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input)
_UpperCamelCase = None
def __UpperCAmelCase ( self : Dict , lowercase_ : str) -> Optional[Any]:
"""simple docstring"""
self.generated_responses.append(lowercase_)
def __UpperCAmelCase ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : Union[str, Any]) -> int:
"""simple docstring"""
_UpperCamelCase = f'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
_UpperCamelCase = "user" if is_user else "bot"
output += f'{name} >> {text} \n'
return output
@add_end_docstrings(
lowerCAmelCase, R'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''', )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : List[Any] , *lowercase_ : Optional[Any] , **lowercase_ : str) -> List[str]:
"""simple docstring"""
super().__init__(*lowercase_ , **lowercase_)
if self.tokenizer.pad_token_id is None:
_UpperCamelCase = self.tokenizer.eos_token
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Union[str, Any]=None , lowercase_ : int=None , lowercase_ : str=None , **lowercase_ : str) -> Tuple:
"""simple docstring"""
_UpperCamelCase = {}
_UpperCamelCase = {}
_UpperCamelCase = {}
if min_length_for_response is not None:
_UpperCamelCase = min_length_for_response
if minimum_tokens is not None:
_UpperCamelCase = minimum_tokens
if "max_length" in generate_kwargs:
_UpperCamelCase = generate_kwargs["max_length"]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
_UpperCamelCase = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(lowercase_)
return preprocess_params, forward_params, postprocess_params
def __call__( self : Any , lowercase_ : Union[Conversation, List[Conversation]] , lowercase_ : str=0 , **lowercase_ : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = super().__call__(lowercase_ , num_workers=lowercase_ , **lowercase_)
if isinstance(lowercase_ , lowercase_) and len(lowercase_) == 1:
return outputs[0]
return outputs
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Conversation , lowercase_ : Any=32) -> Dict[str, Any]:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_):
raise ValueError("ConversationalPipeline, expects Conversation as inputs")
if conversation.new_user_input is None:
raise ValueError(
f'Conversation with UUID {type(conversation.uuid)} does not contain new user input to process. '
"Add user inputs with the conversation's `add_user_input` method")
if hasattr(self.tokenizer , "_build_conversation_input_ids"):
_UpperCamelCase = self.tokenizer._build_conversation_input_ids(lowercase_)
else:
# If the tokenizer cannot handle conversations, we default to only the old version
_UpperCamelCase = self._legacy_parse_and_tokenize(lowercase_)
if self.framework == "pt":
_UpperCamelCase = torch.LongTensor([input_ids])
elif self.framework == "tf":
_UpperCamelCase = tf.constant([input_ids])
return {"input_ids": input_ids, "conversation": conversation}
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Any , lowercase_ : Optional[int]=10 , **lowercase_ : Dict) -> List[str]:
"""simple docstring"""
_UpperCamelCase = generate_kwargs.get("max_length" , self.model.config.max_length)
_UpperCamelCase = model_inputs["input_ids"].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})')
_UpperCamelCase = max_length - minimum_tokens
_UpperCamelCase = model_inputs["input_ids"][:, -trim:]
if "attention_mask" in model_inputs:
_UpperCamelCase = model_inputs["attention_mask"][:, -trim:]
_UpperCamelCase = model_inputs.pop("conversation")
_UpperCamelCase = max_length
_UpperCamelCase = self.model.generate(**lowercase_ , **lowercase_)
if self.model.config.is_encoder_decoder:
_UpperCamelCase = 1
else:
_UpperCamelCase = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : int=True) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = model_outputs["output_ids"]
_UpperCamelCase = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ , )
_UpperCamelCase = model_outputs["conversation"]
conversation.mark_processed()
conversation.append_response(lowercase_)
return conversation
def __UpperCAmelCase ( self : Any , lowercase_ : Conversation) -> Dict:
"""simple docstring"""
_UpperCamelCase = self.tokenizer.eos_token_id
_UpperCamelCase = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_) + [eos_token_id])
else:
input_ids.extend(self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_))
if len(lowercase_) > self.tokenizer.model_max_length:
_UpperCamelCase = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 82 | 1 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = ''''''
__A = '''hf-legacy''' # "hf://"" is reserved for hffs
def __init__( self : Optional[Any] , lowercase_ : Optional[DatasetInfo] = None , lowercase_ : Optional[str] = None , **lowercase_ : Optional[Any] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(self , **lowercase_)
_UpperCamelCase = repo_info
_UpperCamelCase = token
_UpperCamelCase = None
def __UpperCAmelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
if self.dir_cache is None:
_UpperCamelCase = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
_UpperCamelCase = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(lowercase_): {"name": str(lowercase_), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename).parents)[:-1]
})
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : str , lowercase_ : str = "rb" , **lowercase_ : Dict , ) -> Union[str, Any]:
"""simple docstring"""
if not isinstance(self.repo_info , lowercase_):
raise NotImplementedError(f'Open is only implemented for dataset repositories, but got {self.repo_info}')
_UpperCamelCase = hf_hub_url(self.repo_info.id , lowercase_ , revision=self.repo_info.sha)
return fsspec.open(
lowercase_ , mode=lowercase_ , headers=get_authentication_headers_for_url(lowercase_ , use_auth_token=self.token) , client_kwargs={"trust_env": True} , ).open()
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : str , **lowercase_ : Tuple) -> str:
"""simple docstring"""
self._get_dirs()
_UpperCamelCase = self._strip_protocol(lowercase_)
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(lowercase_)
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : List[Any] , lowercase_ : List[Any]=False , **lowercase_ : Any) -> int:
"""simple docstring"""
self._get_dirs()
_UpperCamelCase = PurePosixPath(path.strip("/"))
_UpperCamelCase = {}
for p, f in self.dir_cache.items():
_UpperCamelCase = PurePosixPath(p.strip("/"))
_UpperCamelCase = p.parent
if root == path:
_UpperCamelCase = f
_UpperCamelCase = list(paths.values())
if detail:
return out
else:
return sorted(f["name"] for f in out)
| 82 | def lowerCAmelCase__ ( a__ = 50 ) ->int:
'''simple docstring'''
_UpperCamelCase = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F"{solution() = }")
| 82 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {
'''configuration_longformer''': [
'''LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LongformerConfig''',
'''LongformerOnnxConfig''',
],
'''tokenization_longformer''': ['''LongformerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ['''LongformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongformerForMaskedLM''',
'''LongformerForMultipleChoice''',
'''LongformerForQuestionAnswering''',
'''LongformerForSequenceClassification''',
'''LongformerForTokenClassification''',
'''LongformerModel''',
'''LongformerPreTrainedModel''',
'''LongformerSelfAttention''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLongformerForMaskedLM''',
'''TFLongformerForMultipleChoice''',
'''TFLongformerForQuestionAnswering''',
'''TFLongformerForSequenceClassification''',
'''TFLongformerForTokenClassification''',
'''TFLongformerModel''',
'''TFLongformerPreTrainedModel''',
'''TFLongformerSelfAttention''',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 82 | import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->int:
'''simple docstring'''
_UpperCamelCase = 1.5
_UpperCamelCase = int(factor * num_class_images )
_UpperCamelCase = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=a__ , aesthetic_weight=0.1 )
os.makedirs(f'{class_data_dir}/images' , exist_ok=a__ )
if len(list(Path(f'{class_data_dir}/images' ).iterdir() ) ) >= num_class_images:
return
while True:
_UpperCamelCase = client.query(text=a__ )
if len(a__ ) >= factor * num_class_images or num_images > 1e4:
break
else:
_UpperCamelCase = int(factor * num_images )
_UpperCamelCase = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=a__ , aesthetic_weight=0.1 , )
_UpperCamelCase = 0
_UpperCamelCase = 0
_UpperCamelCase = tqdm(desc="downloading real regularization images" , total=a__ )
with open(f'{class_data_dir}/caption.txt' , "w" ) as fa, open(f'{class_data_dir}/urls.txt' , "w" ) as fa, open(
f'{class_data_dir}/images.txt' , "w" ) as fa:
while total < num_class_images:
_UpperCamelCase = class_images[count]
count += 1
try:
_UpperCamelCase = requests.get(images["url"] )
if img.status_code == 200:
_UpperCamelCase = Image.open(BytesIO(img.content ) )
with open(f'{class_data_dir}/images/{total}.jpg' , "wb" ) as f:
f.write(img.content )
fa.write(images["caption"] + "\n" )
fa.write(images["url"] + "\n" )
fa.write(f'{class_data_dir}/images/{total}.jpg' + "\n" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowerCAmelCase__ ( ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase = argparse.ArgumentParser("" , add_help=a__ )
parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=a__ , type=a__ )
parser.add_argument("--class_data_dir" , help="path to save images" , required=a__ , type=a__ )
parser.add_argument("--num_class_images" , help="number of images to download" , default=200 , type=a__ )
return parser.parse_args()
if __name__ == "__main__":
lowerCamelCase__ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 82 | 1 |
lowerCamelCase__ = 9.80665
def lowerCAmelCase__ ( a__ , a__ , a__ = g ) ->float:
'''simple docstring'''
if fluid_density <= 0:
raise ValueError("Impossible fluid density" )
if volume < 0:
raise ValueError("Impossible Object volume" )
if gravity <= 0:
raise ValueError("Impossible Gravity" )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 82 | import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
lowerCamelCase__ = '''\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
'''
lowerCamelCase__ = '''\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.
'''
lowerCamelCase__ = R'''
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting "1/2" to "\\frac{1}{2}")
Examples:
>>> metric = datasets.load_metric("competition_math")
>>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])
>>> print(results)
{\'accuracy\': 1.0}
'''
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def __UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string"),
"references": datasets.Value("string"),
}) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , )
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Tuple , lowercase_ : str) -> Tuple:
"""simple docstring"""
_UpperCamelCase = 0.0
for i, j in zip(lowercase_ , lowercase_):
n_correct += 1.0 if math_equivalence.is_equiv(lowercase_ , lowercase_) else 0.0
_UpperCamelCase = n_correct / len(lowercase_)
return {
"accuracy": accuracy,
}
| 82 | 1 |
import flax.linen as nn
import jax
import jax.numpy as jnp
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
__A = 42
__A = jnp.floataa
def __UpperCAmelCase ( self : Tuple) -> int:
"""simple docstring"""
_UpperCamelCase = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Tuple , lowercase_ : str) -> str:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = hidden_states.shape
_UpperCamelCase = jax.image.resize(
lowercase_ , shape=(batch, height * 2, width * 2, channels) , method="nearest" , )
_UpperCamelCase = self.conv(lowercase_)
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
__A = 42
__A = jnp.floataa
def __UpperCAmelCase ( self : Dict) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : List[Any] , lowercase_ : Union[str, Any]) -> str:
"""simple docstring"""
_UpperCamelCase = self.conv(lowercase_)
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
__A = 42
__A = None
__A = 0.0
__A = None
__A = jnp.floataa
def __UpperCAmelCase ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = self.in_channels if self.out_channels is None else self.out_channels
_UpperCamelCase = nn.GroupNorm(num_groups=32 , epsilon=1e-5)
_UpperCamelCase = nn.Conv(
lowercase_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_UpperCamelCase = nn.Dense(lowercase_ , dtype=self.dtype)
_UpperCamelCase = nn.GroupNorm(num_groups=32 , epsilon=1e-5)
_UpperCamelCase = nn.Dropout(self.dropout_prob)
_UpperCamelCase = nn.Conv(
lowercase_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_UpperCamelCase = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
_UpperCamelCase = None
if use_nin_shortcut:
_UpperCamelCase = nn.Conv(
lowercase_ , kernel_size=(1, 1) , strides=(1, 1) , padding="VALID" , dtype=self.dtype , )
def __call__( self : Any , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : str=True) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = hidden_states
_UpperCamelCase = self.norma(lowercase_)
_UpperCamelCase = nn.swish(lowercase_)
_UpperCamelCase = self.conva(lowercase_)
_UpperCamelCase = self.time_emb_proj(nn.swish(lowercase_))
_UpperCamelCase = jnp.expand_dims(jnp.expand_dims(lowercase_ , 1) , 1)
_UpperCamelCase = hidden_states + temb
_UpperCamelCase = self.norma(lowercase_)
_UpperCamelCase = nn.swish(lowercase_)
_UpperCamelCase = self.dropout(lowercase_ , lowercase_)
_UpperCamelCase = self.conva(lowercase_)
if self.conv_shortcut is not None:
_UpperCamelCase = self.conv_shortcut(lowercase_)
return hidden_states + residual
| 82 | import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
lowerCamelCase__ = 5_0000
lowerCamelCase__ = 5000
lowerCamelCase__,lowerCamelCase__ = os.path.split(__file__)
lowerCamelCase__ = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def lowerCAmelCase__ ( a__ , a__ ) ->int:
'''simple docstring'''
for i in range(a__ ):
_UpperCamelCase = dataset[i]
@get_duration
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->int:
'''simple docstring'''
for i in range(0 , len(a__ ) , a__ ):
_UpperCamelCase = dataset[i : i + batch_size]
@get_duration
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->Union[str, Any]:
'''simple docstring'''
with dataset.formatted_as(type=a__ ):
for i in range(a__ ):
_UpperCamelCase = dataset[i]
@get_duration
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ ) ->Dict:
'''simple docstring'''
with dataset.formatted_as(type=a__ ):
for i in range(0 , a__ , a__ ):
_UpperCamelCase = dataset[i : i + batch_size]
def lowerCAmelCase__ ( ) ->Dict:
'''simple docstring'''
_UpperCamelCase = {"num examples": SPEED_TEST_N_EXAMPLES}
_UpperCamelCase = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted, {"type": "pandas", "length": SMALL_TEST}),
(read_formatted, {"type": "torch", "length": SMALL_TEST}),
(read_formatted, {"type": "tensorflow", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
_UpperCamelCase = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset" )
_UpperCamelCase = datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} )
_UpperCamelCase = generate_example_dataset(
os.path.join(a__ , "dataset.arrow" ) , a__ , num_examples=a__ , seq_shapes={"list": (100,)} , )
print("first set of iterations" )
for func, kwargs in functions:
print(func.__name__ , str(a__ ) )
_UpperCamelCase = func(a__ , **a__ )
print("shuffling dataset" )
_UpperCamelCase = dataset.shuffle()
print("Second set of iterations (after shuffling" )
for func, kwargs in functions_shuffled:
print("shuffled " , func.__name__ , str(a__ ) )
_UpperCamelCase = func(
a__ , **a__ )
with open(a__ , "wb" ) as f:
f.write(json.dumps(a__ ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 82 | 1 |
def lowerCAmelCase__ ( a__ ) ->str:
'''simple docstring'''
if number > 0:
raise ValueError("input must be a negative integer" )
_UpperCamelCase = len(bin(a__ )[3:] )
_UpperCamelCase = bin(abs(a__ ) - (1 << binary_number_length) )[3:]
_UpperCamelCase = (
(
"1"
+ "0" * (binary_number_length - len(a__ ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
torch.manual_seed(0)
_UpperCamelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def __UpperCAmelCase ( self : List[Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = self.dummy_uncond_unet
_UpperCamelCase = KarrasVeScheduler()
_UpperCamelCase = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_)
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = pipe(num_inference_steps=2 , generator=lowercase_ , output_type="numpy").images
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = pipe(num_inference_steps=2 , generator=lowercase_ , output_type="numpy" , return_dict=lowercase_)[0]
_UpperCamelCase = image[0, -3:, -3:, -1]
_UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
_UpperCamelCase = "google/ncsnpp-celebahq-256"
_UpperCamelCase = UNetaDModel.from_pretrained(lowercase_)
_UpperCamelCase = KarrasVeScheduler()
_UpperCamelCase = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_)
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = pipe(num_inference_steps=20 , generator=lowercase_ , output_type="numpy").images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_UpperCamelCase = np.array([0.5_78, 0.58_11, 0.59_24, 0.58_09, 0.5_87, 0.58_86, 0.58_61, 0.58_02, 0.5_86])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 82 | 1 |
from __future__ import annotations
from math import pi, sqrt
def lowerCAmelCase__ ( a__ , a__ ) ->tuple:
'''simple docstring'''
if inductance <= 0:
raise ValueError("Inductance cannot be 0 or negative" )
elif capacitance <= 0:
raise ValueError("Capacitance cannot be 0 or negative" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__A = MODEL_FOR_MASKED_LM_MAPPING
__A = TF_MODEL_FOR_MASKED_LM_MAPPING
def __UpperCAmelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def __UpperCAmelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf")
_UpperCamelCase = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is grouped", "score": 2.1e-0_5, "token": 38015, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-0_5, "token": 25506, "token_str": " accuser"},
] , )
_UpperCamelCase = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-0_5,
"token": 38015,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-0_5,
"token": 25506,
"token_str": " accuser",
},
] , )
_UpperCamelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3)
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is Clara", "score": 2e-0_5, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-0_5, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-0_5, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt")
_UpperCamelCase = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is Maul", "score": 2.2e-0_5, "token": 35676, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-0_5, "token": 16416, "token_str": "ELS"},
] , )
_UpperCamelCase = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-0_5,
"token": 35676,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-0_5, "token": 16416, "token_str": "ELS"},
] , )
_UpperCamelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3)
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is Patrick", "score": 2.1e-0_5, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-0_5, "token": 2941, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-0_5, "token": 13606, "token_str": " Clara"},
] , )
_UpperCamelCase = unmasker("My name is <mask> <mask>" , top_k=2)
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
[
{
"score": 2.2e-0_5,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-0_5, "token": 16416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-0_5,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-0_5, "token": 16416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def __UpperCAmelCase ( self : Tuple) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt")
# convert model to fp16
pipe.model.half()
_UpperCamelCase = pipe("Paris is the [MASK] of France.")
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(lowercase_ , lowercase_)
@slow
@require_torch
def __UpperCAmelCase ( self : List[Any]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt")
self.run_large_test(lowercase_)
@slow
@require_tf
def __UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf")
self.run_large_test(lowercase_)
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : int) -> Any:
"""simple docstring"""
_UpperCamelCase = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(lowercase_) , [
{"sequence": "My name is John", "score": 0.0_08, "token": 610, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.0_07, "token": 1573, "token_str": " Chris"},
] , )
_UpperCamelCase = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(lowercase_) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.2_51,
"token": 2201,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.2_14,
"token": 12790,
"token_str": " Lyon",
},
] , )
_UpperCamelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3)
self.assertEqual(
nested_simplify(lowercase_) , [
{"sequence": "My name is Patrick", "score": 0.0_05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.0_00, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.0_00, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt")
_UpperCamelCase = None
_UpperCamelCase = None
self.run_pipeline_test(lowercase_ , [])
@require_tf
def __UpperCAmelCase ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf")
_UpperCamelCase = None
_UpperCamelCase = None
self.run_pipeline_test(lowercase_ , [])
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Optional[int]) -> int:
"""simple docstring"""
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)")
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = [
f'This is another {tokenizer.mask_token} test',
]
return fill_masker, examples
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[int]) -> str:
"""simple docstring"""
_UpperCamelCase = fill_masker.tokenizer
_UpperCamelCase = fill_masker.model
_UpperCamelCase = fill_masker(
f'This is a {tokenizer.mask_token}' , )
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = fill_masker([f'This is a {tokenizer.mask_token}'])
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = fill_masker([f'This is a {tokenizer.mask_token}', f'Another {tokenizer.mask_token} great test.'])
self.assertEqual(
lowercase_ , [
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
] , )
with self.assertRaises(lowercase_):
fill_masker([None])
# No mask_token is not supported
with self.assertRaises(lowercase_):
fill_masker("This is")
self.run_test_top_k(lowercase_ , lowercase_)
self.run_test_targets(lowercase_ , lowercase_)
self.run_test_top_k_targets(lowercase_ , lowercase_)
self.fill_mask_with_duplicate_targets_and_top_k(lowercase_ , lowercase_)
self.fill_mask_with_multiple_masks(lowercase_ , lowercase_)
def __UpperCAmelCase ( self : int , lowercase_ : Dict , lowercase_ : List[str]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = tokenizer.get_vocab()
_UpperCamelCase = sorted(vocab.keys())[:2]
# Pipeline argument
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_ , targets=lowercase_)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}')
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , lowercase_)
_UpperCamelCase = [tokenizer.decode([x]) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(lowercase_))
# Call argument
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase_)
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , lowercase_)
_UpperCamelCase = [tokenizer.decode([x]) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(lowercase_))
# Score equivalence
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase_)
_UpperCamelCase = [top_mask["token_str"] for top_mask in outputs]
_UpperCamelCase = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowercase_) == set(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase_)
_UpperCamelCase = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(lowercase_) , nested_simplify(lowercase_))
# Raises with invalid
with self.assertRaises(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[])
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[""])
with self.assertRaises(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets="")
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : str , lowercase_ : List[str]) -> Any:
"""simple docstring"""
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_ , top_k=2)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}')
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2)
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
self.assertEqual(nested_simplify(lowercase_) , nested_simplify(lowercase_))
def __UpperCAmelCase ( self : Any , lowercase_ : Union[str, Any] , lowercase_ : Tuple) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = tokenizer.get_vocab()
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
# top_k=2, ntargets=3
_UpperCamelCase = sorted(vocab.keys())[:3]
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2 , targets=lowercase_)
# If we use the most probably targets, and filter differently, we should still
# have the same results
_UpperCamelCase = [el["token_str"] for el in sorted(lowercase_ , key=lambda lowercase_: x["score"] , reverse=lowercase_)]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowercase_).issubset(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=3 , targets=lowercase_)
# They should yield exactly the same result
self.assertEqual(nested_simplify(lowercase_) , nested_simplify(lowercase_))
def __UpperCAmelCase ( self : int , lowercase_ : Optional[int] , lowercase_ : List[str]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = tokenizer.get_vocab()
# String duplicates + id duplicates
_UpperCamelCase = sorted(vocab.keys())[:3]
_UpperCamelCase = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_UpperCamelCase = fill_masker(f'My name is {tokenizer.mask_token}' , targets=lowercase_ , top_k=10)
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(lowercase_) , 3)
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Any) -> Dict:
"""simple docstring"""
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = fill_masker(
f'This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}' , top_k=2)
self.assertEqual(
lowercase_ , [
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
] , )
| 82 | 1 |
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : str , lowercase_ : Tuple , lowercase_ : Dict=2 , lowercase_ : int=8 , lowercase_ : Optional[Any]=True , lowercase_ : Union[str, Any]=True , lowercase_ : List[Any]=True , lowercase_ : Union[str, Any]=True , lowercase_ : Dict=99 , lowercase_ : str=16 , lowercase_ : Union[str, Any]=5 , lowercase_ : Optional[Any]=2 , lowercase_ : List[str]=36 , lowercase_ : List[str]="gelu" , lowercase_ : List[Any]=0.0 , lowercase_ : int=0.0 , lowercase_ : Optional[Any]=512 , lowercase_ : Optional[Any]=16 , lowercase_ : Dict=2 , lowercase_ : List[str]=0.02 , lowercase_ : Union[str, Any]=3 , lowercase_ : List[str]=4 , lowercase_ : str=None , ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = scope
def __UpperCAmelCase ( self : str) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices)
_UpperCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self : Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = self.get_config()
_UpperCamelCase = 300
return config
def __UpperCAmelCase ( self : List[str]) -> int:
"""simple docstring"""
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = self.prepare_config_and_inputs()
_UpperCamelCase = True
_UpperCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __UpperCAmelCase ( self : Dict , lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : int , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : Dict) -> Any:
"""simple docstring"""
_UpperCamelCase = MraModel(config=lowercase_)
model.to(lowercase_)
model.eval()
_UpperCamelCase = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_)
_UpperCamelCase = model(lowercase_ , token_type_ids=lowercase_)
_UpperCamelCase = model(lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __UpperCAmelCase ( self : int , lowercase_ : str , lowercase_ : Tuple , lowercase_ : Dict , lowercase_ : Optional[int] , lowercase_ : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : str , lowercase_ : Tuple , ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = True
_UpperCamelCase = MraModel(lowercase_)
model.to(lowercase_)
model.eval()
_UpperCamelCase = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , )
_UpperCamelCase = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , encoder_hidden_states=lowercase_ , )
_UpperCamelCase = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __UpperCAmelCase ( self : int , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : Tuple , lowercase_ : int , lowercase_ : int , lowercase_ : Dict , lowercase_ : Optional[int]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = MraForMaskedLM(config=lowercase_)
model.to(lowercase_)
model.eval()
_UpperCamelCase = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : Any , lowercase_ : Optional[int] , lowercase_ : int) -> int:
"""simple docstring"""
_UpperCamelCase = MraForQuestionAnswering(config=lowercase_)
model.to(lowercase_)
model.eval()
_UpperCamelCase = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : int , lowercase_ : Any , lowercase_ : int) -> str:
"""simple docstring"""
_UpperCamelCase = self.num_labels
_UpperCamelCase = MraForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
_UpperCamelCase = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __UpperCAmelCase ( self : Tuple , lowercase_ : int , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : Dict) -> Any:
"""simple docstring"""
_UpperCamelCase = self.num_labels
_UpperCamelCase = MraForTokenClassification(config=lowercase_)
model.to(lowercase_)
model.eval()
_UpperCamelCase = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def __UpperCAmelCase ( self : str , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : List[Any] , lowercase_ : str , lowercase_ : int) -> List[str]:
"""simple docstring"""
_UpperCamelCase = self.num_choices
_UpperCamelCase = MraForMultipleChoice(config=lowercase_)
model.to(lowercase_)
model.eval()
_UpperCamelCase = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def __UpperCAmelCase ( self : List[Any]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
__A = False
__A = False
__A = False
__A = False
__A = ()
def __UpperCAmelCase ( self : Any) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = MraModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=lowercase_ , hidden_size=37)
def __UpperCAmelCase ( self : Dict) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def __UpperCAmelCase ( self : Dict) -> str:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCamelCase = type
self.model_tester.create_and_check_model(*lowercase_)
def __UpperCAmelCase ( self : List[Any]) -> str:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase_)
def __UpperCAmelCase ( self : int) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase_)
def __UpperCAmelCase ( self : Optional[int]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase_)
def __UpperCAmelCase ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase_)
def __UpperCAmelCase ( self : Optional[int]) -> str:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_)
@slow
def __UpperCAmelCase ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = MraModel.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
@unittest.skip(reason="MRA does not output attentions")
def __UpperCAmelCase ( self : Optional[Any]) -> List[Any]:
"""simple docstring"""
return
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : Dict) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = MraModel.from_pretrained("uw-madison/mra-base-512-4")
_UpperCamelCase = torch.arange(256).unsqueeze(0)
with torch.no_grad():
_UpperCamelCase = model(lowercase_)[0]
_UpperCamelCase = torch.Size((1, 256, 768))
self.assertEqual(output.shape , lowercase_)
_UpperCamelCase = torch.tensor(
[[[-0.01_40, 0.08_30, -0.03_81], [0.15_46, 0.14_02, 0.02_20], [0.11_62, 0.08_51, 0.01_65]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=1e-4))
@slow
def __UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
_UpperCamelCase = MraForMaskedLM.from_pretrained("uw-madison/mra-base-512-4")
_UpperCamelCase = torch.arange(256).unsqueeze(0)
with torch.no_grad():
_UpperCamelCase = model(lowercase_)[0]
_UpperCamelCase = 50265
_UpperCamelCase = torch.Size((1, 256, vocab_size))
self.assertEqual(output.shape , lowercase_)
_UpperCamelCase = torch.tensor(
[[[9.25_95, -3.60_38, 11.88_19], [9.38_69, -3.26_93, 11.09_56], [11.85_24, -3.49_38, 13.12_10]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=1e-4))
@slow
def __UpperCAmelCase ( self : List[Any]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = MraForMaskedLM.from_pretrained("uw-madison/mra-base-4096-8-d3")
_UpperCamelCase = torch.arange(4096).unsqueeze(0)
with torch.no_grad():
_UpperCamelCase = model(lowercase_)[0]
_UpperCamelCase = 50265
_UpperCamelCase = torch.Size((1, 4096, vocab_size))
self.assertEqual(output.shape , lowercase_)
_UpperCamelCase = torch.tensor(
[[[5.47_89, -2.35_64, 7.50_64], [7.90_67, -1.33_69, 9.96_68], [9.07_12, -1.81_06, 7.03_80]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=1e-4))
| 82 | lowerCamelCase__ = '''Alexander Joslin'''
import operator as op
from .stack import Stack
def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
_UpperCamelCase = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
_UpperCamelCase = Stack()
_UpperCamelCase = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(a__ ) )
elif i in operators:
# RULE 2
operator_stack.push(a__ )
elif i == ")":
# RULE 4
_UpperCamelCase = operator_stack.peek()
operator_stack.pop()
_UpperCamelCase = operand_stack.peek()
operand_stack.pop()
_UpperCamelCase = operand_stack.peek()
operand_stack.pop()
_UpperCamelCase = operators[opr](a__ , a__ )
operand_stack.push(a__ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCamelCase__ = '''(5 + ((4 * 2) * (2 + 3)))'''
# answer = 45
print(F"{equation} = {dijkstras_two_stack_algorithm(equation)}")
| 82 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : Optional[Any]) -> Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __UpperCAmelCase ( self : Any) -> List[str]:
"""simple docstring"""
_UpperCamelCase = 1
_UpperCamelCase = 3
_UpperCamelCase = (32, 32)
_UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0)).to(lowercase_)
return image
@property
def __UpperCAmelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0)
_UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=lowercase_ , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def __UpperCAmelCase ( self : List[str]) -> Any:
"""simple docstring"""
torch.manual_seed(0)
_UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def __UpperCAmelCase ( self : Dict) -> Tuple:
"""simple docstring"""
torch.manual_seed(0)
_UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
return CLIPTextModel(lowercase_)
def __UpperCAmelCase ( self : List[str]) -> Any:
"""simple docstring"""
_UpperCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.dummy_cond_unet_upscale
_UpperCamelCase = DDPMScheduler()
_UpperCamelCase = DDIMScheduler(prediction_type="v_prediction")
_UpperCamelCase = self.dummy_vae
_UpperCamelCase = self.dummy_text_encoder
_UpperCamelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
_UpperCamelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1)[0]
_UpperCamelCase = Image.fromarray(np.uinta(lowercase_)).convert("RGB").resize((64, 64))
# make sure here that pndm scheduler skips prk
_UpperCamelCase = StableDiffusionUpscalePipeline(
unet=lowercase_ , low_res_scheduler=lowercase_ , scheduler=lowercase_ , vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , max_noise_level=350 , )
_UpperCamelCase = sd_pipe.to(lowercase_)
sd_pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = "A painting of a squirrel eating a burger"
_UpperCamelCase = torch.Generator(device=lowercase_).manual_seed(0)
_UpperCamelCase = sd_pipe(
[prompt] , image=lowercase_ , generator=lowercase_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
_UpperCamelCase = output.images
_UpperCamelCase = torch.Generator(device=lowercase_).manual_seed(0)
_UpperCamelCase = sd_pipe(
[prompt] , image=lowercase_ , generator=lowercase_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , return_dict=lowercase_ , )[0]
_UpperCamelCase = image[0, -3:, -3:, -1]
_UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
_UpperCamelCase = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
_UpperCamelCase = np.array([0.31_13, 0.39_10, 0.42_72, 0.48_59, 0.50_61, 0.46_52, 0.53_62, 0.57_15, 0.56_61])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCAmelCase ( self : Any) -> Any:
"""simple docstring"""
_UpperCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.dummy_cond_unet_upscale
_UpperCamelCase = DDPMScheduler()
_UpperCamelCase = DDIMScheduler(prediction_type="v_prediction")
_UpperCamelCase = self.dummy_vae
_UpperCamelCase = self.dummy_text_encoder
_UpperCamelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
_UpperCamelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1)[0]
_UpperCamelCase = Image.fromarray(np.uinta(lowercase_)).convert("RGB").resize((64, 64))
# make sure here that pndm scheduler skips prk
_UpperCamelCase = StableDiffusionUpscalePipeline(
unet=lowercase_ , low_res_scheduler=lowercase_ , scheduler=lowercase_ , vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , max_noise_level=350 , )
_UpperCamelCase = sd_pipe.to(lowercase_)
sd_pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = "A painting of a squirrel eating a burger"
_UpperCamelCase = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
_UpperCamelCase = output.images
assert image.shape[0] == 2
_UpperCamelCase = torch.Generator(device=lowercase_).manual_seed(0)
_UpperCamelCase = sd_pipe(
[prompt] , image=lowercase_ , generator=lowercase_ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
_UpperCamelCase = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU")
def __UpperCAmelCase ( self : Tuple) -> Dict:
"""simple docstring"""
_UpperCamelCase = self.dummy_cond_unet_upscale
_UpperCamelCase = DDPMScheduler()
_UpperCamelCase = DDIMScheduler(prediction_type="v_prediction")
_UpperCamelCase = self.dummy_vae
_UpperCamelCase = self.dummy_text_encoder
_UpperCamelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
_UpperCamelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1)[0]
_UpperCamelCase = Image.fromarray(np.uinta(lowercase_)).convert("RGB").resize((64, 64))
# put models in fp16, except vae as it overflows in fp16
_UpperCamelCase = unet.half()
_UpperCamelCase = text_encoder.half()
# make sure here that pndm scheduler skips prk
_UpperCamelCase = StableDiffusionUpscalePipeline(
unet=lowercase_ , low_res_scheduler=lowercase_ , scheduler=lowercase_ , vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , max_noise_level=350 , )
_UpperCamelCase = sd_pipe.to(lowercase_)
sd_pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = "A painting of a squirrel eating a burger"
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = sd_pipe(
[prompt] , image=lowercase_ , generator=lowercase_ , num_inference_steps=2 , output_type="np" , ).images
_UpperCamelCase = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : Optional[int]) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : int) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png")
_UpperCamelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat.npy")
_UpperCamelCase = "stabilityai/stable-diffusion-x4-upscaler"
_UpperCamelCase = StableDiffusionUpscalePipeline.from_pretrained(lowercase_)
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
pipe.enable_attention_slicing()
_UpperCamelCase = "a cat sitting on a park bench"
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = pipe(
prompt=lowercase_ , image=lowercase_ , generator=lowercase_ , output_type="np" , )
_UpperCamelCase = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 1e-3
def __UpperCAmelCase ( self : List[Any]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png")
_UpperCamelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat_fp16.npy")
_UpperCamelCase = "stabilityai/stable-diffusion-x4-upscaler"
_UpperCamelCase = StableDiffusionUpscalePipeline.from_pretrained(
lowercase_ , torch_dtype=torch.floataa , )
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
pipe.enable_attention_slicing()
_UpperCamelCase = "a cat sitting on a park bench"
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = pipe(
prompt=lowercase_ , image=lowercase_ , generator=lowercase_ , output_type="np" , )
_UpperCamelCase = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 5e-1
def __UpperCAmelCase ( self : Tuple) -> int:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png")
_UpperCamelCase = "stabilityai/stable-diffusion-x4-upscaler"
_UpperCamelCase = StableDiffusionUpscalePipeline.from_pretrained(
lowercase_ , torch_dtype=torch.floataa , )
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
_UpperCamelCase = "a cat sitting on a park bench"
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = pipe(
prompt=lowercase_ , image=lowercase_ , generator=lowercase_ , num_inference_steps=5 , output_type="np" , )
_UpperCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 82 | import logging
from transformers import PretrainedConfig
lowerCamelCase__ = logging.getLogger(__name__)
lowerCamelCase__ = {
'''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''',
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''bertabs'''
def __init__( self : List[str] , lowercase_ : int=30522 , lowercase_ : str=512 , lowercase_ : int=6 , lowercase_ : Optional[Any]=512 , lowercase_ : Optional[Any]=8 , lowercase_ : Optional[int]=512 , lowercase_ : Tuple=0.2 , lowercase_ : Union[str, Any]=6 , lowercase_ : List[Any]=768 , lowercase_ : List[str]=8 , lowercase_ : int=2048 , lowercase_ : Tuple=0.2 , **lowercase_ : str , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**lowercase_)
_UpperCamelCase = vocab_size
_UpperCamelCase = max_pos
_UpperCamelCase = enc_layers
_UpperCamelCase = enc_hidden_size
_UpperCamelCase = enc_heads
_UpperCamelCase = enc_ff_size
_UpperCamelCase = enc_dropout
_UpperCamelCase = dec_layers
_UpperCamelCase = dec_hidden_size
_UpperCamelCase = dec_heads
_UpperCamelCase = dec_ff_size
_UpperCamelCase = dec_dropout
| 82 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''vit_msn'''
def __init__( self : Union[str, Any] , lowercase_ : Optional[int]=768 , lowercase_ : int=12 , lowercase_ : Any=12 , lowercase_ : int=3072 , lowercase_ : Optional[Any]="gelu" , lowercase_ : str=0.0 , lowercase_ : Any=0.0 , lowercase_ : Tuple=0.02 , lowercase_ : Optional[Any]=1e-0_6 , lowercase_ : Union[str, Any]=224 , lowercase_ : Optional[int]=16 , lowercase_ : List[str]=3 , lowercase_ : List[Any]=True , **lowercase_ : str , ) -> str:
"""simple docstring"""
super().__init__(**lowercase_)
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = image_size
_UpperCamelCase = patch_size
_UpperCamelCase = num_channels
_UpperCamelCase = qkv_bias
| 82 | from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowerCamelCase__ = input('''Enter image url: ''').strip()
print(F"Downloading image from {url} ...")
lowerCamelCase__ = BeautifulSoup(requests.get(url).content, '''html.parser''')
# The image URL is in the content field of the first meta tag with property og:image
lowerCamelCase__ = soup.find('''meta''', {'''property''': '''og:image'''})['''content''']
lowerCamelCase__ = requests.get(image_url).content
lowerCamelCase__ = F"{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"
with open(file_name, '''wb''') as fp:
fp.write(image_data)
print(F"Done. Image saved to disk as {file_name}.")
| 82 | 1 |
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
lowerCamelCase__ = re.compile(R'''^(?P<major>\d+)''' R'''\.(?P<minor>\d+)''' R'''\.(?P<patch>\d+)$''')
@total_ordering
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
__A = 42
__A = None
__A = None
__A = None
__A = None
def __UpperCAmelCase ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = _str_to_version_tuple(self.version_str)
def __repr__( self : Union[str, Any]) -> int:
"""simple docstring"""
return f'{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'
@property
def __UpperCAmelCase ( self : str) -> Union[str, Any]:
"""simple docstring"""
return self.major, self.minor, self.patch
def __UpperCAmelCase ( self : Tuple , lowercase_ : Optional[Any]) -> Dict:
"""simple docstring"""
if isinstance(lowercase_ , lowercase_):
return Version(lowercase_)
elif isinstance(lowercase_ , lowercase_):
return other
raise TypeError(f'{other} (type {type(lowercase_)}) cannot be compared to version.')
def __eq__( self : List[Any] , lowercase_ : int) -> int:
"""simple docstring"""
try:
_UpperCamelCase = self._validate_operand(lowercase_)
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : Optional[Any] , lowercase_ : List[Any]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = self._validate_operand(lowercase_)
return self.tuple < other.tuple
def __hash__( self : Optional[int]) -> str:
"""simple docstring"""
return hash(_version_tuple_to_str(self.tuple))
@classmethod
def __UpperCAmelCase ( cls : int , lowercase_ : Any) -> Any:
"""simple docstring"""
_UpperCamelCase = {f.name for f in dataclasses.fields(cls)}
return cls(**{k: v for k, v in dic.items() if k in field_names})
def __UpperCAmelCase ( self : List[str]) -> str:
"""simple docstring"""
return self.version_str
def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
_UpperCamelCase = _VERSION_REG.match(a__ )
if not res:
raise ValueError(f'Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.' )
return tuple(int(a__ ) for v in [res.group("major" ), res.group("minor" ), res.group("patch" )] )
def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
return ".".join(str(a__ ) for v in version_tuple )
| 82 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'''
),
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''dpr'''
def __init__( self : Optional[Any] , lowercase_ : int=30522 , lowercase_ : str=768 , lowercase_ : List[Any]=12 , lowercase_ : Dict=12 , lowercase_ : str=3072 , lowercase_ : Any="gelu" , lowercase_ : Any=0.1 , lowercase_ : Any=0.1 , lowercase_ : str=512 , lowercase_ : str=2 , lowercase_ : List[Any]=0.02 , lowercase_ : Dict=1e-1_2 , lowercase_ : List[str]=0 , lowercase_ : Union[str, Any]="absolute" , lowercase_ : int = 0 , **lowercase_ : int , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=lowercase_ , **lowercase_)
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_act
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = projection_dim
_UpperCamelCase = position_embedding_type
| 82 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
lowerCamelCase__ = {
'''configuration_ernie''': ['''ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ErnieConfig''', '''ErnieOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ErnieForCausalLM''',
'''ErnieForMaskedLM''',
'''ErnieForMultipleChoice''',
'''ErnieForNextSentencePrediction''',
'''ErnieForPreTraining''',
'''ErnieForQuestionAnswering''',
'''ErnieForSequenceClassification''',
'''ErnieForTokenClassification''',
'''ErnieModel''',
'''ErniePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 82 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
'''configuration_table_transformer''': [
'''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TableTransformerConfig''',
'''TableTransformerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TableTransformerForObjectDetection''',
'''TableTransformerModel''',
'''TableTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 82 | 1 |
from collections.abc import Callable
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : Dict , lowercase_ : Callable | None = None) -> None:
"""simple docstring"""
_UpperCamelCase = []
# Stores indexes of each item for supporting updates and deletion.
_UpperCamelCase = {}
# Stores current size of heap.
_UpperCamelCase = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
_UpperCamelCase = key or (lambda lowercase_: x)
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : int) -> int | None:
"""simple docstring"""
return int((i - 1) / 2) if i > 0 else None
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : int) -> int | None:
"""simple docstring"""
_UpperCamelCase = int(2 * i + 1)
return left if 0 < left < self.size else None
def __UpperCAmelCase ( self : List[str] , lowercase_ : int) -> int | None:
"""simple docstring"""
_UpperCamelCase = int(2 * i + 2)
return right if 0 < right < self.size else None
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : int , lowercase_ : int) -> None:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
_UpperCamelCase , _UpperCamelCase = self.arr[j], self.arr[i]
def __UpperCAmelCase ( self : List[Any] , lowercase_ : int , lowercase_ : int) -> bool:
"""simple docstring"""
return self.arr[i][1] < self.arr[j][1]
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : int) -> int:
"""simple docstring"""
_UpperCamelCase = self._left(lowercase_)
_UpperCamelCase = self._right(lowercase_)
_UpperCamelCase = i
if left is not None and not self._cmp(lowercase_ , lowercase_):
_UpperCamelCase = left
if right is not None and not self._cmp(lowercase_ , lowercase_):
_UpperCamelCase = right
return valid_parent
def __UpperCAmelCase ( self : Tuple , lowercase_ : int) -> None:
"""simple docstring"""
_UpperCamelCase = self._parent(lowercase_)
while parent is not None and not self._cmp(lowercase_ , lowercase_):
self._swap(lowercase_ , lowercase_)
_UpperCamelCase , _UpperCamelCase = parent, self._parent(lowercase_)
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : int) -> None:
"""simple docstring"""
_UpperCamelCase = self._get_valid_parent(lowercase_)
while valid_parent != index:
self._swap(lowercase_ , lowercase_)
_UpperCamelCase , _UpperCamelCase = valid_parent, self._get_valid_parent(lowercase_)
def __UpperCAmelCase ( self : List[str] , lowercase_ : int , lowercase_ : int) -> None:
"""simple docstring"""
if item not in self.pos_map:
return
_UpperCamelCase = self.pos_map[item]
_UpperCamelCase = [item, self.key(lowercase_)]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(lowercase_)
self._heapify_down(lowercase_)
def __UpperCAmelCase ( self : str , lowercase_ : int) -> None:
"""simple docstring"""
if item not in self.pos_map:
return
_UpperCamelCase = self.pos_map[item]
del self.pos_map[item]
_UpperCamelCase = self.arr[self.size - 1]
_UpperCamelCase = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(lowercase_)
self._heapify_down(lowercase_)
def __UpperCAmelCase ( self : int , lowercase_ : int , lowercase_ : int) -> None:
"""simple docstring"""
_UpperCamelCase = len(self.arr)
if arr_len == self.size:
self.arr.append([item, self.key(lowercase_)])
else:
_UpperCamelCase = [item, self.key(lowercase_)]
_UpperCamelCase = self.size
self.size += 1
self._heapify_up(self.size - 1)
def __UpperCAmelCase ( self : Optional[Any]) -> tuple | None:
"""simple docstring"""
return self.arr[0] if self.size else None
def __UpperCAmelCase ( self : List[Any]) -> tuple | None:
"""simple docstring"""
_UpperCamelCase = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0])
return top_item_tuple
def lowerCAmelCase__ ( ) ->None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowerCamelCase__ = {
'''cola''': 2,
'''mnli''': 3,
'''mrpc''': 2,
'''sst-2''': 2,
'''sts-b''': 1,
'''qqp''': 2,
'''qnli''': 2,
'''rte''': 2,
'''wnli''': 2,
}
logging.set_verbosity_info()
def lowerCAmelCase__ ( a__ , a__ , a__ , a__=None ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase = XLNetConfig.from_json_file(a__ )
_UpperCamelCase = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f'Building PyTorch XLNetForSequenceClassification model from configuration: {config}' )
_UpperCamelCase = finetuning_task
_UpperCamelCase = GLUE_TASKS_NUM_LABELS[finetuning_task]
_UpperCamelCase = XLNetForSequenceClassification(a__ )
elif "squad" in finetuning_task:
_UpperCamelCase = finetuning_task
_UpperCamelCase = XLNetForQuestionAnswering(a__ )
else:
_UpperCamelCase = XLNetLMHeadModel(a__ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(a__ , a__ , a__ )
# Save pytorch-model
_UpperCamelCase = os.path.join(a__ , a__ )
_UpperCamelCase = os.path.join(a__ , a__ )
print(f'Save PyTorch model to {os.path.abspath(a__ )}' )
torch.save(model.state_dict() , a__ )
print(f'Save configuration file to {os.path.abspath(a__ )}' )
with open(a__ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--xlnet_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained XLNet model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--finetuning_task''',
default=None,
type=str,
help='''Name of a task on which the XLNet TensorFlow model was fine-tuned''',
)
lowerCamelCase__ = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 82 | 1 |
import warnings
warnings.warn(
'''memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: '''
'''`from accelerate import find_executable_batch_size` to avoid this warning.''',
FutureWarning,
)
| 82 | import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _UpperCAmelCase ( pl.LightningModule ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase_ : Tuple) -> int:
"""simple docstring"""
super().__init__()
_UpperCamelCase = model
_UpperCamelCase = 2
_UpperCamelCase = nn.Linear(self.model.config.hidden_size , self.num_labels)
def __UpperCAmelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
pass
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->str:
'''simple docstring'''
_UpperCamelCase = LongformerModel.from_pretrained(a__ )
_UpperCamelCase = LightningModel(a__ )
_UpperCamelCase = torch.load(a__ , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
_UpperCamelCase = LongformerForQuestionAnswering.from_pretrained(a__ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(a__ )
print(f'Conversion successful. Model saved under {pytorch_dump_folder_path}' )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase__ = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 82 | 1 |
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = ['''image_processor''']
__A = '''SamImageProcessor'''
def __init__( self : Union[str, Any] , lowercase_ : Tuple) -> Any:
"""simple docstring"""
super().__init__(lowercase_)
_UpperCamelCase = self.image_processor
_UpperCamelCase = -10
_UpperCamelCase = self.image_processor.size["longest_edge"]
def __call__( self : List[Any] , lowercase_ : List[Any]=None , lowercase_ : Dict=None , lowercase_ : Optional[Any]=None , lowercase_ : Any=None , lowercase_ : Optional[Union[str, TensorType]] = None , **lowercase_ : Optional[int] , ) -> BatchEncoding:
"""simple docstring"""
_UpperCamelCase = self.image_processor(
lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
# pop arguments that are not used in the foward but used nevertheless
_UpperCamelCase = encoding_image_processor["original_sizes"]
if hasattr(lowercase_ , "numpy"): # Checks if Torch or TF tensor
_UpperCamelCase = original_sizes.numpy()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self._check_and_preprocess_points(
input_points=lowercase_ , input_labels=lowercase_ , input_boxes=lowercase_ , )
_UpperCamelCase = self._normalize_and_convert(
lowercase_ , lowercase_ , input_points=lowercase_ , input_labels=lowercase_ , input_boxes=lowercase_ , return_tensors=lowercase_ , )
return encoding_image_processor
def __UpperCAmelCase ( self : str , lowercase_ : Optional[int] , lowercase_ : Tuple , lowercase_ : Union[str, Any]=None , lowercase_ : Tuple=None , lowercase_ : int=None , lowercase_ : List[Any]="pt" , ) -> List[str]:
"""simple docstring"""
if input_points is not None:
if len(lowercase_) != len(lowercase_):
_UpperCamelCase = [
self._normalize_coordinates(self.target_size , lowercase_ , original_sizes[0]) for point in input_points
]
else:
_UpperCamelCase = [
self._normalize_coordinates(self.target_size , lowercase_ , lowercase_)
for point, original_size in zip(lowercase_ , lowercase_)
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points):
if input_labels is not None:
_UpperCamelCase , _UpperCamelCase = self._pad_points_and_labels(lowercase_ , lowercase_)
_UpperCamelCase = np.array(lowercase_)
if input_labels is not None:
_UpperCamelCase = np.array(lowercase_)
if input_boxes is not None:
if len(lowercase_) != len(lowercase_):
_UpperCamelCase = [
self._normalize_coordinates(self.target_size , lowercase_ , original_sizes[0] , is_bounding_box=lowercase_)
for box in input_boxes
]
else:
_UpperCamelCase = [
self._normalize_coordinates(self.target_size , lowercase_ , lowercase_ , is_bounding_box=lowercase_)
for box, original_size in zip(lowercase_ , lowercase_)
]
_UpperCamelCase = np.array(lowercase_)
if input_boxes is not None:
if return_tensors == "pt":
_UpperCamelCase = torch.from_numpy(lowercase_)
# boxes batch size of 1 by default
_UpperCamelCase = input_boxes.unsqueeze(1) if len(input_boxes.shape) != 3 else input_boxes
elif return_tensors == "tf":
_UpperCamelCase = tf.convert_to_tensor(lowercase_)
# boxes batch size of 1 by default
_UpperCamelCase = tf.expand_dims(lowercase_ , 1) if len(input_boxes.shape) != 3 else input_boxes
encoding_image_processor.update({"input_boxes": input_boxes})
if input_points is not None:
if return_tensors == "pt":
_UpperCamelCase = torch.from_numpy(lowercase_)
# point batch size of 1 by default
_UpperCamelCase = input_points.unsqueeze(1) if len(input_points.shape) != 4 else input_points
elif return_tensors == "tf":
_UpperCamelCase = tf.convert_to_tensor(lowercase_)
# point batch size of 1 by default
_UpperCamelCase = tf.expand_dims(lowercase_ , 1) if len(input_points.shape) != 4 else input_points
encoding_image_processor.update({"input_points": input_points})
if input_labels is not None:
if return_tensors == "pt":
_UpperCamelCase = torch.from_numpy(lowercase_)
# point batch size of 1 by default
_UpperCamelCase = input_labels.unsqueeze(1) if len(input_labels.shape) != 3 else input_labels
elif return_tensors == "tf":
_UpperCamelCase = tf.convert_to_tensor(lowercase_)
# point batch size of 1 by default
_UpperCamelCase = tf.expand_dims(lowercase_ , 1) if len(input_labels.shape) != 3 else input_labels
encoding_image_processor.update({"input_labels": input_labels})
return encoding_image_processor
def __UpperCAmelCase ( self : Tuple , lowercase_ : int , lowercase_ : str) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = max([point.shape[0] for point in input_points])
_UpperCamelCase = []
for i, point in enumerate(lowercase_):
if point.shape[0] != expected_nb_points:
_UpperCamelCase = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2)) + self.point_pad_value] , axis=0)
_UpperCamelCase = np.append(input_labels[i] , [self.point_pad_value])
processed_input_points.append(lowercase_)
_UpperCamelCase = processed_input_points
return input_points, input_labels
def __UpperCAmelCase ( self : Tuple , lowercase_ : int , lowercase_ : np.ndarray , lowercase_ : str , lowercase_ : str=False) -> np.ndarray:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = original_size
_UpperCamelCase , _UpperCamelCase = self.image_processor._get_preprocess_shape(lowercase_ , longest_edge=lowercase_)
_UpperCamelCase = deepcopy(lowercase_).astype(lowercase_)
if is_bounding_box:
_UpperCamelCase = coords.reshape(-1 , 2 , 2)
_UpperCamelCase = coords[..., 0] * (new_w / old_w)
_UpperCamelCase = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
_UpperCamelCase = coords.reshape(-1 , 4)
return coords
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : List[str]=None , lowercase_ : Optional[int]=None , lowercase_ : Union[str, Any]=None , ) -> Optional[Any]:
"""simple docstring"""
if input_points is not None:
if hasattr(lowercase_ , "numpy"): # Checks for TF or Torch tensor
_UpperCamelCase = input_points.numpy().tolist()
if not isinstance(lowercase_ , lowercase_) or not isinstance(input_points[0] , lowercase_):
raise ValueError("Input points must be a list of list of floating points.")
_UpperCamelCase = [np.array(lowercase_) for input_point in input_points]
else:
_UpperCamelCase = None
if input_labels is not None:
if hasattr(lowercase_ , "numpy"):
_UpperCamelCase = input_labels.numpy().tolist()
if not isinstance(lowercase_ , lowercase_) or not isinstance(input_labels[0] , lowercase_):
raise ValueError("Input labels must be a list of list integers.")
_UpperCamelCase = [np.array(lowercase_) for label in input_labels]
else:
_UpperCamelCase = None
if input_boxes is not None:
if hasattr(lowercase_ , "numpy"):
_UpperCamelCase = input_boxes.numpy().tolist()
if (
not isinstance(lowercase_ , lowercase_)
or not isinstance(input_boxes[0] , lowercase_)
or not isinstance(input_boxes[0][0] , lowercase_)
):
raise ValueError("Input boxes must be a list of list of list of floating points.")
_UpperCamelCase = [np.array(lowercase_).astype(np.floataa) for box in input_boxes]
else:
_UpperCamelCase = None
return input_points, input_labels, input_boxes
@property
def __UpperCAmelCase ( self : int) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(lowercase_))
def __UpperCAmelCase ( self : Dict , *lowercase_ : Optional[int] , **lowercase_ : Any) -> Union[str, Any]:
"""simple docstring"""
return self.image_processor.post_process_masks(*lowercase_ , **lowercase_)
| 82 | import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowerCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : str , *lowercase_ : List[str] , **lowercase_ : Union[str, Any]) -> None:
"""simple docstring"""
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_)
| 82 | 1 |
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('''1.6'''):
lowerCamelCase__ = True
from torch.cuda.amp import autocast
lowerCamelCase__ = logging.getLogger(__name__)
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
__A = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
__A = field(
default=lowerCAmelCase, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, )
__A = field(
default=lowerCAmelCase, metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
__A = field(
default=lowerCAmelCase, metadata={'''help''': '''Whether to log verbose messages or not.'''}, )
__A = field(
default=2.0, metadata={'''help''': '''Maximum temperature for gumbel softmax.'''} )
__A = field(
default=0.5, metadata={'''help''': '''Minimum temperature for gumbel softmax.'''} )
__A = field(
default=0.999_995, metadata={'''help''': '''Decay of gumbel temperature during training.'''} )
def lowerCAmelCase__ ( a__ , a__ ) ->Any:
'''simple docstring'''
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
_UpperCamelCase = logging.WARNING
if model_args.verbose_logging:
_UpperCamelCase = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
_UpperCamelCase = logging.INFO
logger.setLevel(a__ )
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
__A = field(
default=lowerCAmelCase, metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
__A = field(
default=lowerCAmelCase, metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
__A = field(
default='''train''', metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
}, )
__A = field(
default='''validation''', metadata={
'''help''': (
'''The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''''
)
}, )
__A = field(
default='''file''', metadata={'''help''': '''Column in the dataset that contains speech file path. Defaults to \'file\''''}, )
__A = field(
default=lowerCAmelCase, metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
__A = field(
default=1, metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
}, )
__A = field(
default=lowerCAmelCase, metadata={'''help''': '''The number of processes to use for the preprocessing.'''}, )
__A = field(
default=20.0, metadata={'''help''': '''Filter audio files that are longer than `max_duration_in_seconds` seconds'''} )
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
__A = 42
__A = 42
__A = "longest"
__A = None
__A = None
def __call__( self : Tuple , lowercase_ : List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:
"""simple docstring"""
_UpperCamelCase = self.feature_extractor.pad(
lowercase_ , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
_UpperCamelCase = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1])
_UpperCamelCase = batch["input_values"].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
_UpperCamelCase = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1)).to(
torch.long)
_UpperCamelCase = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch["input_values"].device)
# these two operations makes sure that all values
# before the output lengths indices are attended to
_UpperCamelCase = 1
_UpperCamelCase = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()
# sample randomly masked indices
_UpperCamelCase = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=lowercase_ , min_masks=2 , )
return batch
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Tuple , *lowercase_ : Dict , lowercase_ : Optional[int]=1 , lowercase_ : Union[str, Any]=0 , lowercase_ : Tuple=1.0 , **lowercase_ : Tuple) -> Tuple:
"""simple docstring"""
super().__init__(*lowercase_ , **lowercase_)
_UpperCamelCase = 0
_UpperCamelCase = max_gumbel_temp
_UpperCamelCase = min_gumbel_temp
_UpperCamelCase = gumbel_temp_decay
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : nn.Module , lowercase_ : Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor:
"""simple docstring"""
model.train()
_UpperCamelCase = self._prepare_inputs(lowercase_)
if self.use_amp:
with autocast():
_UpperCamelCase = self.compute_loss(lowercase_ , lowercase_)
else:
_UpperCamelCase = self.compute_loss(lowercase_ , lowercase_)
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
_UpperCamelCase = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_UpperCamelCase = loss.sum() / (inputs["mask_time_indices"]).sum()
else:
raise ValueError(f'{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']')
if self.args.gradient_accumulation_steps > 1:
_UpperCamelCase = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(lowercase_).backward()
elif self.use_apex:
with amp.scale_loss(lowercase_ , self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(lowercase_)
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp))
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp))
return loss.detach()
def lowerCAmelCase__ ( ) ->List[Any]:
'''simple docstring'''
_UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses()
configure_logger(a__ , a__ )
# Downloading and loading a dataset from the hub.
_UpperCamelCase = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
_UpperCamelCase = DatasetDict()
_UpperCamelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'{data_args.train_split_name}[:{data_args.validation_split_percentage}%]' , cache_dir=model_args.cache_dir , )
_UpperCamelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'{data_args.train_split_name}[{data_args.validation_split_percentage}%:]' , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
_UpperCamelCase = DatasetDict()
_UpperCamelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , )
_UpperCamelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'{data_args.train_split_name}' , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
_UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=a__ )
def prepare_dataset(a__ ):
# check that all files have the correct sampling rate
_UpperCamelCase , _UpperCamelCase = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
_UpperCamelCase = datasets.map(
a__ , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names )
# filter audio files that are too long
_UpperCamelCase = vectorized_datasets.filter(
lambda a__ : len(data["speech"] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(a__ ):
return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
_UpperCamelCase = vectorized_datasets.map(
a__ , batched=a__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
_UpperCamelCase = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"
" ``config.feat_extract_norm='layer'" )
_UpperCamelCase = WavaVecaForPreTraining(a__ )
_UpperCamelCase = DataCollatorForWavaVecaPretraining(model=a__ , feature_extractor=a__ )
_UpperCamelCase = WavaVecaPreTrainer(
model=a__ , data_collator=a__ , args=a__ , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=a__ , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 82 | import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( enum.Enum ):
'''simple docstring'''
__A = 0
__A = 1
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''generated'''
def __init__( self : Any , *lowercase_ : Dict , **lowercase_ : Tuple) -> List[Any]:
"""simple docstring"""
super().__init__(*lowercase_ , **lowercase_)
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING)
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : Union[str, Any]=None , lowercase_ : Optional[Any]=None , lowercase_ : Optional[int]=None , lowercase_ : Optional[Any]=None , lowercase_ : Any=None , lowercase_ : Union[str, Any]=None , **lowercase_ : Optional[Any] , ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = {}
if truncation is not None:
_UpperCamelCase = truncation
_UpperCamelCase = generate_kwargs
_UpperCamelCase = {}
if return_tensors is not None and return_type is None:
_UpperCamelCase = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
_UpperCamelCase = return_type
if clean_up_tokenization_spaces is not None:
_UpperCamelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
_UpperCamelCase = self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_)
if len(lowercase_) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim.")
_UpperCamelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __UpperCAmelCase ( self : int , lowercase_ : int , lowercase_ : int , lowercase_ : int) -> Any:
"""simple docstring"""
return True
def __UpperCAmelCase ( self : Dict , *lowercase_ : List[str] , lowercase_ : List[Any]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = self.model.config.prefix if self.model.config.prefix is not None else ""
if isinstance(args[0] , lowercase_):
if self.tokenizer.pad_token_id is None:
raise ValueError("Please make sure that the tokenizer has a pad_token_id when using a batch input")
_UpperCamelCase = ([prefix + arg for arg in args[0]],)
_UpperCamelCase = True
elif isinstance(args[0] , lowercase_):
_UpperCamelCase = (prefix + args[0],)
_UpperCamelCase = False
else:
raise ValueError(
f' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`')
_UpperCamelCase = self.tokenizer(*lowercase_ , padding=lowercase_ , truncation=lowercase_ , return_tensors=self.framework)
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : List[Any] , *lowercase_ : Any , **lowercase_ : int) -> Dict:
"""simple docstring"""
_UpperCamelCase = super().__call__(*lowercase_ , **lowercase_)
if (
isinstance(args[0] , lowercase_)
and all(isinstance(lowercase_ , lowercase_) for el in args[0])
and all(len(lowercase_) == 1 for res in result)
):
return [res[0] for res in result]
return result
def __UpperCAmelCase ( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : str=TruncationStrategy.DO_NOT_TRUNCATE , **lowercase_ : Dict) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = self._parse_and_tokenize(lowercase_ , truncation=lowercase_ , **lowercase_)
return inputs
def __UpperCAmelCase ( self : str , lowercase_ : str , **lowercase_ : str) -> str:
"""simple docstring"""
if self.framework == "pt":
_UpperCamelCase , _UpperCamelCase = model_inputs["input_ids"].shape
elif self.framework == "tf":
_UpperCamelCase , _UpperCamelCase = tf.shape(model_inputs["input_ids"]).numpy()
_UpperCamelCase = generate_kwargs.get("min_length" , self.model.config.min_length)
_UpperCamelCase = generate_kwargs.get("max_length" , self.model.config.max_length)
self.check_inputs(lowercase_ , generate_kwargs["min_length"] , generate_kwargs["max_length"])
_UpperCamelCase = self.model.generate(**lowercase_ , **lowercase_)
_UpperCamelCase = output_ids.shape[0]
if self.framework == "pt":
_UpperCamelCase = output_ids.reshape(lowercase_ , out_b // in_b , *output_ids.shape[1:])
elif self.framework == "tf":
_UpperCamelCase = tf.reshape(lowercase_ , (in_b, out_b // in_b, *output_ids.shape[1:]))
return {"output_ids": output_ids}
def __UpperCAmelCase ( self : Dict , lowercase_ : str , lowercase_ : int=ReturnType.TEXT , lowercase_ : int=False) -> Tuple:
"""simple docstring"""
_UpperCamelCase = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
_UpperCamelCase = {f'{self.return_name}_token_ids': output_ids}
elif return_type == ReturnType.TEXT:
_UpperCamelCase = {
f'{self.return_name}_text': self.tokenizer.decode(
lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ , )
}
records.append(lowercase_)
return records
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''summary'''
def __call__( self : Optional[Any] , *lowercase_ : int , **lowercase_ : Dict) -> Optional[int]:
"""simple docstring"""
return super().__call__(*lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : List[str] , lowercase_ : int , lowercase_ : int , lowercase_ : int) -> bool:
"""simple docstring"""
if max_length < min_length:
logger.warning(f'Your min_length={min_length} must be inferior than your max_length={max_length}.')
if input_length < max_length:
logger.warning(
f'Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '
"a summarization task, where outputs shorter than the input are typically wanted, you might "
f'consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})')
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''translation'''
def __UpperCAmelCase ( self : Dict , lowercase_ : int , lowercase_ : int , lowercase_ : int) -> int:
"""simple docstring"""
if input_length > 0.9 * max_length:
logger.warning(
f'Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '
"increasing your max_length manually, e.g. translator('...', max_length=400)")
return True
def __UpperCAmelCase ( self : Tuple , *lowercase_ : Any , lowercase_ : List[Any]=TruncationStrategy.DO_NOT_TRUNCATE , lowercase_ : Any=None , lowercase_ : Optional[Any]=None) -> List[str]:
"""simple docstring"""
if getattr(self.tokenizer , "_build_translation_inputs" , lowercase_):
return self.tokenizer._build_translation_inputs(
*lowercase_ , return_tensors=self.framework , truncation=lowercase_ , src_lang=lowercase_ , tgt_lang=lowercase_)
else:
return super()._parse_and_tokenize(*lowercase_ , truncation=lowercase_)
def __UpperCAmelCase ( self : List[str] , lowercase_ : Dict=None , lowercase_ : str=None , **lowercase_ : List[Any]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = super()._sanitize_parameters(**lowercase_)
if src_lang is not None:
_UpperCamelCase = src_lang
if tgt_lang is not None:
_UpperCamelCase = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
_UpperCamelCase = kwargs.get("task" , self.task)
_UpperCamelCase = task.split("_")
if task and len(lowercase_) == 4:
# translation, XX, to YY
_UpperCamelCase = items[1]
_UpperCamelCase = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[str] , *lowercase_ : List[str] , **lowercase_ : str) -> Union[str, Any]:
"""simple docstring"""
return super().__call__(*lowercase_ , **lowercase_)
| 82 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.