code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = pipeline(
task="zero-shot-audio-classification", model="hf-internal-testing/tiny-clap-htsat-unfused")
_lowerCAmelCase : Tuple = load_dataset("ashraq/esc50")
_lowerCAmelCase : List[str] = dataset["train"]["audio"][-1]["array"]
_lowerCAmelCase : Dict = audio_classifier(__a, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"])
self.assertEqual(
nested_simplify(__a), [{"score": 0.501, "label": "Sound of a dog"}, {"score": 0.499, "label": "Sound of vaccum cleaner"}], )
@unittest.skip("No models are available in TF")
def snake_case__ ( self):
'''simple docstring'''
pass
@slow
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = pipeline(
task="zero-shot-audio-classification", model="laion/clap-htsat-unfused", )
# This is an audio of a dog
_lowerCAmelCase : Any = load_dataset("ashraq/esc50")
_lowerCAmelCase : Tuple = dataset["train"]["audio"][-1]["array"]
_lowerCAmelCase : Tuple = audio_classifier(__a, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"])
self.assertEqual(
nested_simplify(__a), [
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
], )
_lowerCAmelCase : Optional[int] = audio_classifier([audio] * 5, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"])
self.assertEqual(
nested_simplify(__a), [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5, )
_lowerCAmelCase : Optional[Any] = audio_classifier(
[audio] * 5, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"], batch_size=5)
self.assertEqual(
nested_simplify(__a), [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5, )
@unittest.skip("No models are available in TF")
def snake_case__ ( self):
'''simple docstring'''
pass
| 36 |
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
lowerCamelCase_ : Optional[int] = """
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
lowerCamelCase_ : Optional[Any] = """
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
lowerCamelCase_ : Optional[int] = """
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A=False ) -> Optional[Any]:
if return_pvalue:
a =pearsonr(__A , __A )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(__A , __A )[0] )} | 81 | 0 |
"""simple docstring"""
_A = 8.3_144_598
def UpperCAmelCase ( a_, a_ ):
'''simple docstring'''
if temperature < 0:
raise Exception('Temperature cannot be less than 0 K' )
if molar_mass <= 0:
raise Exception('Molar mass cannot be less than or equal to 0 kg/mol' )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_A = 3_0_0
_A = 2_8
_A = rms_speed_of_molecule(temperature, molar_mass)
print(F"""Vrms of Nitrogen gas at 300 K is {vrms} m/s""")
| 371 |
"""simple docstring"""
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
_A = re.compile(r'^(?P<major>\d+)' r'\.(?P<minor>\d+)' r'\.(?P<patch>\d+)$')
@total_ordering
@dataclass
class _lowercase :
lowercase_ = 42
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
def _UpperCamelCase ( self ) -> List[str]:
lowerCamelCase , lowerCamelCase , lowerCamelCase : List[str] = _str_to_version_tuple(self.version_str )
def __repr__( self ) -> int:
return F"""{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"""
@property
def _UpperCamelCase ( self ) -> Dict:
return self.major, self.minor, self.patch
def _UpperCamelCase ( self , UpperCAmelCase_ ) -> Optional[int]:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return Version(UpperCAmelCase_ )
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return other
raise TypeError(F"""{other} (type {type(UpperCAmelCase_ )}) cannot be compared to version.""" )
def __eq__( self , UpperCAmelCase_ ) -> Optional[Any]:
try:
lowerCamelCase : List[str] = self._validate_operand(UpperCAmelCase_ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self , UpperCAmelCase_ ) -> Optional[int]:
lowerCamelCase : Optional[int] = self._validate_operand(UpperCAmelCase_ )
return self.tuple < other.tuple
def __hash__( self ) -> Optional[Any]:
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def _UpperCamelCase ( cls , UpperCAmelCase_ ) -> Union[str, Any]:
lowerCamelCase : str = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def _UpperCamelCase ( self ) -> str:
return self.version_str
def UpperCAmelCase ( a_ ):
'''simple docstring'''
lowerCamelCase : Tuple = _VERSION_REG.match(a_ )
if not res:
raise ValueError(F"""Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits.""" )
return tuple(int(a_ ) for v in [res.group('major' ), res.group('minor' ), res.group('patch' )] )
def UpperCAmelCase ( a_ ):
'''simple docstring'''
return ".".join(str(a_ ) for v in version_tuple )
| 205 | 0 |
import sys
A_ :List[Any] = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def A ( a_ = N ) -> int:
__UpperCamelCase : List[str] =-sys.maxsize - 1
for i in range(len(a_ ) - 12 ):
__UpperCamelCase : Optional[int] =1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
__UpperCamelCase : List[str] =product
return largest_product
if __name__ == "__main__":
print(f"{solution() = }")
| 71 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Optional[int] ) -> Any:
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase_ : Union[str, Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : str = -1
UpperCAmelCase_ : Dict = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ )
UpperCAmelCase_ : Any = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
UpperCAmelCase_ : List[Any] = TextStreamer(lowerCamelCase_ )
model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
UpperCAmelCase_ : Optional[int] = cs.out[:-1]
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: Dict ) -> Optional[Any]:
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = -1
UpperCAmelCase_ : List[Any] = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ )
UpperCAmelCase_ : Dict = tokenizer.decode(greedy_ids[0] )
UpperCAmelCase_ : str = TextIteratorStreamer(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
UpperCAmelCase_ : str = Thread(target=model.generate ,kwargs=lowerCamelCase_ )
thread.start()
UpperCAmelCase_ : int = """"""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: List[Any] ) -> Dict:
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = -1
UpperCAmelCase_ : Tuple = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
UpperCAmelCase_ : Dict = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ )
UpperCAmelCase_ : str = greedy_ids[:, input_ids.shape[1] :]
UpperCAmelCase_ : Dict = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
UpperCAmelCase_ : List[Any] = TextStreamer(lowerCamelCase_ ,skip_prompt=lowerCamelCase_ )
model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
UpperCAmelCase_ : List[str] = cs.out[:-1]
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: str ) -> str:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained("""distilgpt2""" )
UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Any = -1
UpperCAmelCase_ : Union[str, Any] = torch.ones((1, 5) ,device=lowerCamelCase_ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
UpperCAmelCase_ : Union[str, Any] = TextStreamer(lowerCamelCase_ ,skip_special_tokens=lowerCamelCase_ )
model.generate(lowerCamelCase_ ,max_new_tokens=1 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
UpperCAmelCase_ : List[str] = cs.out[:-1] # Remove the final "\n"
UpperCAmelCase_ : Dict = tokenizer(lowerCamelCase_ ,return_tensors="""pt""" )
self.assertEqual(streamer_text_tokenized.input_ids.shape ,(1, 1) )
def A__ ( self: List[str] ) -> Any:
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase_ : Any = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = -1
UpperCAmelCase_ : Optional[Any] = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = TextIteratorStreamer(lowerCamelCase_ ,timeout=0.0_0_1 )
UpperCAmelCase_ : Any = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
UpperCAmelCase_ : Dict = Thread(target=model.generate ,kwargs=lowerCamelCase_ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(lowerCamelCase_ ):
UpperCAmelCase_ : Union[str, Any] = """"""
for new_text in streamer:
streamer_text += new_text
| 345 | 0 |
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase_ ( a__ , unittest.TestCase ):
# TODO: is there an appropriate internal test set?
UpperCAmelCase__ : Tuple = "ssube/stable-diffusion-x4-upscaler-onnx"
def snake_case_ ( self, SCREAMING_SNAKE_CASE_=0 ) -> int:
UpperCamelCase : Tuple = floats_tensor((1, 3, 128, 128), rng=random.Random(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : str = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def snake_case_ ( self ) -> str:
UpperCamelCase : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = self.get_dummy_inputs()
UpperCamelCase : Optional[int] = pipe(**SCREAMING_SNAKE_CASE_ ).images
UpperCamelCase : Optional[int] = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
UpperCamelCase : Optional[int] = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def snake_case_ ( self ) -> Dict:
UpperCamelCase : List[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider' )
UpperCamelCase : Any = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = self.get_dummy_inputs()
UpperCamelCase : int = pipe(**SCREAMING_SNAKE_CASE_ ).images
UpperCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase : Tuple = np.array(
[0.6_89_88_92, 0.59_24_05_56, 0.52_49_95_27, 0.58_86_62_15, 0.52_25_82_35, 0.52_57_27_15, 0.62_41_44_73, 0.6_17_43_87, 0.6_21_49_64] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def snake_case_ ( self ) -> str:
UpperCamelCase : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider' )
UpperCamelCase : int = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = self.get_dummy_inputs()
UpperCamelCase : Dict = pipe(**SCREAMING_SNAKE_CASE_ ).images
UpperCamelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase : List[Any] = np.array(
[0.7_65_92_78, 0.76_43_76_64, 0.75_57_91_07, 0.7_69_11_16, 0.77_66_69_86, 0.7_72_76_72, 0.7_75_86_64, 0.7_81_22_26, 0.76_94_25_15] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider' )
UpperCamelCase : str = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = self.get_dummy_inputs()
UpperCamelCase : List[str] = pipe(**SCREAMING_SNAKE_CASE_ ).images
UpperCamelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase : Optional[int] = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider' )
UpperCamelCase : Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = self.get_dummy_inputs()
UpperCamelCase : List[str] = pipe(**SCREAMING_SNAKE_CASE_ ).images
UpperCamelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase : Optional[int] = np.array(
[0.77_42_44_96, 0.77_36_01, 0.7_64_52_88, 0.7_76_95_98, 0.7_77_27_39, 0.7_73_86_88, 0.78_18_72_33, 0.77_87_95_84, 0.76_70_43] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
@property
def snake_case_ ( self ) -> Tuple:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : str = ort.SessionOptions()
UpperCamelCase : str = False
return options
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
UpperCamelCase : Any = init_image.resize((128, 128) )
# using the PNDM scheduler by default
UpperCamelCase : Any = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx', provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = 'A fantasy landscape, trending on artstation'
UpperCamelCase : int = torch.manual_seed(0 )
UpperCamelCase : Optional[int] = pipe(
prompt=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, guidance_scale=7.5, num_inference_steps=10, generator=SCREAMING_SNAKE_CASE_, output_type='np', )
UpperCamelCase : Union[str, Any] = output.images
UpperCamelCase : Tuple = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
UpperCamelCase : str = np.array([0.48_83, 0.49_47, 0.49_80, 0.49_75, 0.49_82, 0.49_80, 0.50_00, 0.50_06, 0.49_72] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
UpperCamelCase : List[str] = init_image.resize((128, 128) )
UpperCamelCase : List[Any] = LMSDiscreteScheduler.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx', subfolder='scheduler' )
UpperCamelCase : List[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx', scheduler=SCREAMING_SNAKE_CASE_, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = 'A fantasy landscape, trending on artstation'
UpperCamelCase : List[Any] = torch.manual_seed(0 )
UpperCamelCase : Optional[Any] = pipe(
prompt=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, guidance_scale=7.5, num_inference_steps=20, generator=SCREAMING_SNAKE_CASE_, output_type='np', )
UpperCamelCase : Dict = output.images
UpperCamelCase : Dict = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
UpperCamelCase : Optional[int] = np.array(
[0.50_17_37_53, 0.50_22_33_56, 0.50_20_39, 0.50_23_30_36, 0.5_02_37_25, 0.5_02_26_01, 0.5_01_87_58, 0.50_23_40_85, 0.50_24_15_66] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 364 |
import re
def UpperCamelCase ( snake_case__ : str ) -> str:
if len(re.findall('[ATCG]' , snake_case__ ) ) != len(snake_case__ ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 103 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
__lowerCAmelCase : List[str] =logging.get_logger(__name__)
__lowerCAmelCase : Dict ={
"""deepmind/language-perceiver""": """https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json""",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class _A ( lowerCAmelCase ):
snake_case__ : Dict = 'perceiver'
def __init__( self , __lowerCAmelCase=256 , __lowerCAmelCase=1280 , __lowerCAmelCase=768 , __lowerCAmelCase=1 , __lowerCAmelCase=26 , __lowerCAmelCase=8 , __lowerCAmelCase=8 , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="kv" , __lowerCAmelCase=1 , __lowerCAmelCase=1 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=True , __lowerCAmelCase=262 , __lowerCAmelCase=2048 , __lowerCAmelCase=56 , __lowerCAmelCase=[368, 496] , __lowerCAmelCase=16 , __lowerCAmelCase=1920 , __lowerCAmelCase=16 , __lowerCAmelCase=[1, 16, 224, 224] , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
lowercase = num_latents
lowercase = d_latents
lowercase = d_model
lowercase = num_blocks
lowercase = num_self_attends_per_block
lowercase = num_self_attention_heads
lowercase = num_cross_attention_heads
lowercase = qk_channels
lowercase = v_channels
lowercase = cross_attention_shape_for_attention
lowercase = self_attention_widening_factor
lowercase = cross_attention_widening_factor
lowercase = hidden_act
lowercase = attention_probs_dropout_prob
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = use_query_residual
# masked language modeling attributes
lowercase = vocab_size
lowercase = max_position_embeddings
# image classification attributes
lowercase = image_size
# flow attributes
lowercase = train_size
# multimodal autoencoding attributes
lowercase = num_frames
lowercase = audio_samples_per_frame
lowercase = samples_per_patch
lowercase = output_shape
class _A ( lowerCAmelCase ):
@property
def A__ ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
lowercase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""inputs""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
@property
def A__ ( self ):
"""simple docstring"""
return 1E-4
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = -1 , __lowerCAmelCase = -1 , __lowerCAmelCase = -1 , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = 3 , __lowerCAmelCase = 40 , __lowerCAmelCase = 40 , ):
"""simple docstring"""
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase = compute_effective_axis_dimension(
__lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase = preprocessor.num_special_tokens_to_add(__lowerCAmelCase )
lowercase = compute_effective_axis_dimension(
__lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
lowercase = [""" """.join(["""a"""] ) * seq_length] * batch_size
lowercase = dict(preprocessor(__lowerCAmelCase , return_tensors=__lowerCAmelCase ) )
lowercase = inputs.pop("""input_ids""" )
return inputs
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase = compute_effective_axis_dimension(__lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch )
lowercase = self._generate_dummy_images(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowercase = dict(preprocessor(images=__lowerCAmelCase , return_tensors=__lowerCAmelCase ) )
lowercase = inputs.pop("""pixel_values""" )
return inputs
else:
raise ValueError(
"""Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.""" )
| 197 | """simple docstring"""
from ..utils import DummyObject, requires_backends
class _A ( metaclass=lowerCAmelCase ):
snake_case__ : str = ['torch', 'transformers', 'onnx']
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class _A ( metaclass=lowerCAmelCase ):
snake_case__ : List[Any] = ['torch', 'transformers', 'onnx']
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class _A ( metaclass=lowerCAmelCase ):
snake_case__ : Optional[Any] = ['torch', 'transformers', 'onnx']
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class _A ( metaclass=lowerCAmelCase ):
snake_case__ : int = ['torch', 'transformers', 'onnx']
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class _A ( metaclass=lowerCAmelCase ):
snake_case__ : List[str] = ['torch', 'transformers', 'onnx']
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class _A ( metaclass=lowerCAmelCase ):
snake_case__ : Optional[Any] = ['torch', 'transformers', 'onnx']
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
| 197 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
a = logging.get_logger(__name__)
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : List[Any] , *_UpperCAmelCase : str , **_UpperCAmelCase : Any ):
warnings.warn(
'The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use VideoMAEImageProcessor instead.' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 271 |
"""simple docstring"""
def _snake_case ( _snake_case : str ) -> str:
'''simple docstring'''
_A = ''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def _snake_case ( _snake_case : str ) -> dict[str, str]:
'''simple docstring'''
_A = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
_A = remove_duplicates(key.upper() )
_A = len(_snake_case )
# First fill cipher with key characters
_A = {alphabet[i]: char for i, char in enumerate(_snake_case )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(_snake_case ) , 26 ):
_A = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
_A = alphabet[i - offset]
_A = char
return cipher_alphabet
def _snake_case ( _snake_case : str , _snake_case : dict[str, str] ) -> str:
'''simple docstring'''
return "".join(cipher_map.get(_snake_case , _snake_case ) for ch in message.upper() )
def _snake_case ( _snake_case : str , _snake_case : dict[str, str] ) -> str:
'''simple docstring'''
_A = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(_snake_case , _snake_case ) for ch in message.upper() )
def _snake_case ( ) -> None:
'''simple docstring'''
_A = input('Enter message to encode or decode: ' ).strip()
_A = input('Enter keyword: ' ).strip()
_A = input('Encipher or decipher? E/D:' ).strip()[0].lower()
try:
_A = {'e': encipher, 'd': decipher}[option]
except KeyError:
raise KeyError('invalid input option' )
_A = create_cipher_map(_snake_case )
print(func(_snake_case , _snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 271 | 1 |
"""simple docstring"""
# Imports
import numpy as np
class A_ :
'''simple docstring'''
def __init__( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None ):
"""simple docstring"""
self.set_matricies(red=lowercase_ , green=lowercase_ , blue=lowercase_ , red_edge=lowercase_ , nir=lowercase_ )
def UpperCamelCase__ ( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None ):
"""simple docstring"""
if red is not None:
UpperCAmelCase_ : Union[str, Any] = red
if green is not None:
UpperCAmelCase_ : Dict = green
if blue is not None:
UpperCAmelCase_ : Optional[int] = blue
if red_edge is not None:
UpperCAmelCase_ : Optional[int] = red_edge
if nir is not None:
UpperCAmelCase_ : str = nir
return True
def UpperCamelCase__ ( self , lowercase_="" , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None ):
"""simple docstring"""
self.set_matricies(red=lowercase_ , green=lowercase_ , blue=lowercase_ , red_edge=lowercase_ , nir=lowercase_ )
UpperCAmelCase_ : str = {
"ARVI2": self.arvaa,
"CCCI": self.ccci,
"CVI": self.cvi,
"GLI": self.gli,
"NDVI": self.ndvi,
"BNDVI": self.bndvi,
"redEdgeNDVI": self.red_edge_ndvi,
"GNDVI": self.gndvi,
"GBNDVI": self.gbndvi,
"GRNDVI": self.grndvi,
"RBNDVI": self.rbndvi,
"PNDVI": self.pndvi,
"ATSAVI": self.atsavi,
"BWDRVI": self.bwdrvi,
"CIgreen": self.ci_green,
"CIrededge": self.ci_rededge,
"CI": self.ci,
"CTVI": self.ctvi,
"GDVI": self.gdvi,
"EVI": self.evi,
"GEMI": self.gemi,
"GOSAVI": self.gosavi,
"GSAVI": self.gsavi,
"Hue": self.hue,
"IVI": self.ivi,
"IPVI": self.ipvi,
"I": self.i,
"RVI": self.rvi,
"MRVI": self.mrvi,
"MSAVI": self.m_savi,
"NormG": self.norm_g,
"NormNIR": self.norm_nir,
"NormR": self.norm_r,
"NGRDI": self.ngrdi,
"RI": self.ri,
"S": self.s,
"IF": self._if,
"DVI": self.dvi,
"TVI": self.tvi,
"NDRE": self.ndre,
}
try:
return funcs[index]()
except KeyError:
print("Index not in the list!" )
return False
def UpperCamelCase__ ( self ):
"""simple docstring"""
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def UpperCamelCase__ ( self ):
"""simple docstring"""
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.nir * (self.red / (self.green**2))
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.nir - self.red) / (self.nir + self.red)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.nir - self.blue) / (self.nir + self.blue)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.redEdge - self.red) / (self.redEdge + self.red)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def UpperCamelCase__ ( self , lowercase_=0.08 , lowercase_=1.22 , lowercase_=0.03 ):
"""simple docstring"""
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.nir / self.green) - 1
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.nir / self.redEdge) - 1
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.red - self.blue) / self.red
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.nir - self.green
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.1_25) / (1 - self.red)
def UpperCamelCase__ ( self , lowercase_=0.16 ):
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green + y)
def UpperCamelCase__ ( self , lowercase_=0.5 ):
"""simple docstring"""
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def UpperCamelCase__ ( self , lowercase_=None , lowercase_=None ):
"""simple docstring"""
return (self.nir - b) / (a * self.red)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.red + self.green + self.blue) / 30.5
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.nir / self.red
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.rvi() - 1) / (self.rvi() + 1)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.green / (self.nir + self.red + self.green)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.nir / (self.nir + self.red + self.green)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.red / (self.nir + self.red + self.green)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.green - self.red) / (self.green + self.red)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.red - self.green) / (self.red + self.green)
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
UpperCAmelCase_ : Optional[Any] = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.nir / self.red
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.ndvi() + 0.5) ** (1 / 2)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 61 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
lowerCamelCase__ : int = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''),
('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''),
('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''),
('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''),
('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''),
('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''),
('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''),
('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''),
('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''),
('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''),
]
)
def UpperCamelCase ( _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : int, _lowerCAmelCase : Optional[int] ) -> Dict:
_UpperCAmelCase : Union[str, Any] = state_dict.pop(_lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = val
def UpperCamelCase ( _lowerCAmelCase : List[Any] ) -> List[str]:
_UpperCAmelCase : Union[str, Any] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
_UpperCAmelCase : Tuple = key.replace("""backbone.0.body""", """backbone.conv_encoder.model""" )
_UpperCAmelCase : Any = value
else:
_UpperCAmelCase : List[Any] = value
return new_state_dict
def UpperCamelCase ( _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Tuple=False ) -> Optional[Any]:
_UpperCAmelCase : int = """"""
if is_panoptic:
_UpperCAmelCase : str = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_UpperCAmelCase : Dict = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
_UpperCAmelCase : Union[str, Any] = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase : Any = in_proj_weight[:256, :]
_UpperCAmelCase : Tuple = in_proj_bias[:256]
_UpperCAmelCase : Optional[int] = in_proj_weight[256:512, :]
_UpperCAmelCase : str = in_proj_bias[256:512]
_UpperCAmelCase : int = in_proj_weight[-256:, :]
_UpperCAmelCase : List[Any] = in_proj_bias[-256:]
def UpperCamelCase ( ) -> Any:
_UpperCAmelCase : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_UpperCAmelCase : Dict = Image.open(requests.get(_lowerCAmelCase, stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( _lowerCAmelCase : Tuple, _lowerCAmelCase : Any ) -> List[Any]:
_UpperCAmelCase : str = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
_UpperCAmelCase : Dict = """resnet101"""
if "dc5" in model_name:
_UpperCAmelCase : Union[str, Any] = True
_UpperCAmelCase : Optional[Any] = """panoptic""" in model_name
if is_panoptic:
_UpperCAmelCase : Optional[int] = 250
else:
_UpperCAmelCase : str = 91
_UpperCAmelCase : Optional[int] = """huggingface/label-files"""
_UpperCAmelCase : str = """coco-detection-id2label.json"""
_UpperCAmelCase : Any = json.load(open(hf_hub_download(_lowerCAmelCase, _lowerCAmelCase, repo_type="""dataset""" ), """r""" ) )
_UpperCAmelCase : Union[str, Any] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
_UpperCAmelCase : List[str] = idalabel
_UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()}
# load image processor
_UpperCAmelCase : Optional[int] = """coco_panoptic""" if is_panoptic else """coco_detection"""
_UpperCAmelCase : int = ConditionalDetrImageProcessor(format=_lowerCAmelCase )
# prepare image
_UpperCAmelCase : List[str] = prepare_img()
_UpperCAmelCase : Any = image_processor(images=_lowerCAmelCase, return_tensors="""pt""" )
_UpperCAmelCase : Any = encoding["""pixel_values"""]
logger.info(f'''Converting model {model_name}...''' )
# load original model from torch hub
_UpperCAmelCase : Tuple = torch.hub.load("""DeppMeng/ConditionalDETR""", _lowerCAmelCase, pretrained=_lowerCAmelCase ).eval()
_UpperCAmelCase : Tuple = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
_UpperCAmelCase : Optional[int] = """conditional_detr.""" + src
rename_key(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = rename_backbone_keys(_lowerCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(_lowerCAmelCase, is_panoptic=_lowerCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_UpperCAmelCase : List[str] = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
_UpperCAmelCase : Tuple = state_dict.pop(_lowerCAmelCase )
_UpperCAmelCase : Any = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_UpperCAmelCase : Optional[Any] = state_dict.pop(_lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
_UpperCAmelCase : Tuple = state_dict.pop(_lowerCAmelCase )
_UpperCAmelCase : Optional[int] = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
_UpperCAmelCase : Tuple = state_dict.pop(_lowerCAmelCase )
_UpperCAmelCase : Any = val
# finally, create HuggingFace model and load state dict
_UpperCAmelCase : Union[str, Any] = ConditionalDetrForSegmentation(_lowerCAmelCase ) if is_panoptic else ConditionalDetrForObjectDetection(_lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
model.eval()
model.push_to_hub(repo_id=_lowerCAmelCase, organization="""DepuMeng""", commit_message="""Add model""" )
# verify our conversion
_UpperCAmelCase : Any = conditional_detr(_lowerCAmelCase )
_UpperCAmelCase : int = model(_lowerCAmelCase )
assert torch.allclose(outputs.logits, original_outputs["""pred_logits"""], atol=1E-4 )
assert torch.allclose(outputs.pred_boxes, original_outputs["""pred_boxes"""], atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks, original_outputs["""pred_masks"""], atol=1E-4 )
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ : List[str] = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''conditional_detr_resnet50''',
type=str,
help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
lowerCamelCase__ : int = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 246 | 0 |
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
A_ :Tuple = Mapping[str, np.ndarray]
A_ :List[Any] = Mapping[str, Any] # Is a nested dict.
A_ :int = 0.01
@dataclasses.dataclass(frozen=SCREAMING_SNAKE_CASE__ )
class __A :
"""simple docstring"""
UpperCamelCase__ : np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
UpperCamelCase__ : np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
UpperCamelCase__ : np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
UpperCamelCase__ : np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
UpperCamelCase__ : np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
UpperCamelCase__ : Optional[np.ndarray] =None
# Optional remark about the protein. Included as a comment in output PDB
# files
UpperCamelCase__ : Optional[str] =None
# Templates used to generate this protein (prediction-only)
UpperCamelCase__ : Optional[Sequence[str]] =None
# Chain corresponding to each parent
UpperCamelCase__ : Optional[Sequence[int]] =None
def A ( a_ ) -> int:
__UpperCamelCase : List[str] =R"(\[[A-Z]+\]\n)"
__UpperCamelCase : List[str] =[tag.strip() for tag in re.split(_a ,_a ) if len(_a ) > 0]
__UpperCamelCase : Iterator[Tuple[str, List[str]]] =zip(tags[0::2] ,[l.split('\n' ) for l in tags[1::2]] )
__UpperCamelCase : List[str] =["N", "CA", "C"]
__UpperCamelCase : Union[str, Any] =None
__UpperCamelCase : str =None
__UpperCamelCase : List[str] =None
for g in groups:
if "[PRIMARY]" == g[0]:
__UpperCamelCase : Any =g[1][0].strip()
for i in range(len(_a ) ):
if seq[i] not in residue_constants.restypes:
__UpperCamelCase : Tuple ="X" # FIXME: strings are immutable
__UpperCamelCase : int =np.array(
[residue_constants.restype_order.get(_a ,residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
__UpperCamelCase : List[List[float]] =[]
for axis in range(3 ):
tertiary.append(list(map(_a ,g[1][axis].split() ) ) )
__UpperCamelCase : Union[str, Any] =np.array(_a )
__UpperCamelCase : Union[str, Any] =np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(_a ):
__UpperCamelCase : str =np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
__UpperCamelCase : Tuple =np.array(list(map({'-': 0, '+': 1}.get ,g[1][0].strip() ) ) )
__UpperCamelCase : int =np.zeros(
(
len(_a ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(_a ):
__UpperCamelCase : Dict =1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=_a ,atom_mask=_a ,aatype=_a ,residue_index=np.arange(len(_a ) ) ,b_factors=_a ,)
def A ( a_ ,a_ = 0 ) -> List[Any]:
__UpperCamelCase : List[str] =[]
__UpperCamelCase : Union[str, Any] =prot.remark
if remark is not None:
pdb_headers.append(F'REMARK {remark}' )
__UpperCamelCase : Union[str, Any] =prot.parents
__UpperCamelCase : Union[str, Any] =prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
__UpperCamelCase : List[Any] =[p for i, p in zip(_a ,_a ) if i == chain_id]
if parents is None or len(_a ) == 0:
__UpperCamelCase : List[Any] =["N/A"]
pdb_headers.append(F'PARENT {" ".join(_a )}' )
return pdb_headers
def A ( a_ ,a_ ) -> str:
__UpperCamelCase : List[str] =[]
__UpperCamelCase : Tuple =pdb_str.split('\n' )
__UpperCamelCase : Dict =prot.remark
if remark is not None:
out_pdb_lines.append(F'REMARK {remark}' )
__UpperCamelCase : List[List[str]]
if prot.parents is not None and len(prot.parents ) > 0:
__UpperCamelCase : Any =[]
if prot.parents_chain_index is not None:
__UpperCamelCase : Dict[str, List[str]] ={}
for p, i in zip(prot.parents ,prot.parents_chain_index ):
parent_dict.setdefault(str(_a ) ,[] )
parent_dict[str(_a )].append(_a )
__UpperCamelCase : Tuple =max([int(_a ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
__UpperCamelCase : str =parent_dict.get(str(_a ) ,['N/A'] )
parents_per_chain.append(_a )
else:
parents_per_chain.append(list(prot.parents ) )
else:
__UpperCamelCase : Optional[Any] =[["N/A"]]
def make_parent_line(a_ ) -> str:
return F'PARENT {" ".join(_a )}'
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
__UpperCamelCase : List[str] =0
for i, l in enumerate(_a ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(_a )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(_a ):
__UpperCamelCase : str =parents_per_chain[chain_counter]
else:
__UpperCamelCase : Union[str, Any] =["N/A"]
out_pdb_lines.append(make_parent_line(_a ) )
return "\n".join(_a )
def A ( a_ ) -> List[str]:
__UpperCamelCase : str =residue_constants.restypes + ["X"]
def res_atoa(a_ ) -> str:
return residue_constants.restype_atoa.get(restypes[r] ,'UNK' )
__UpperCamelCase : Any =residue_constants.atom_types
__UpperCamelCase : List[str] =[]
__UpperCamelCase : Any =prot.atom_mask
__UpperCamelCase : Optional[int] =prot.aatype
__UpperCamelCase : Tuple =prot.atom_positions
__UpperCamelCase : Union[str, Any] =prot.residue_index.astype(np.intaa )
__UpperCamelCase : Dict =prot.b_factors
__UpperCamelCase : List[str] =prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError('Invalid aatypes.' )
__UpperCamelCase : List[str] =get_pdb_headers(_a )
if len(_a ) > 0:
pdb_lines.extend(_a )
__UpperCamelCase : Optional[Any] =aatype.shape[0]
__UpperCamelCase : Union[str, Any] =1
__UpperCamelCase : Any =0
__UpperCamelCase : Any =string.ascii_uppercase
__UpperCamelCase : Any =None
# Add all atom sites.
for i in range(_a ):
__UpperCamelCase : Any =res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(_a ,atom_positions[i] ,atom_mask[i] ,b_factors[i] ):
if mask < 0.5:
continue
__UpperCamelCase : Tuple ="ATOM"
__UpperCamelCase : List[str] =atom_name if len(_a ) == 4 else F' {atom_name}'
__UpperCamelCase : Union[str, Any] =""
__UpperCamelCase : List[Any] =""
__UpperCamelCase : Dict =1.00
__UpperCamelCase : Dict =atom_name[0] # Protein supports only C, N, O, S, this works.
__UpperCamelCase : List[str] =""
__UpperCamelCase : Tuple ="A"
if chain_index is not None:
__UpperCamelCase : Optional[Any] =chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
__UpperCamelCase : Tuple =(
F'{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}'
F'{res_name_a:>3} {chain_tag:>1}'
F'{residue_index[i]:>4}{insertion_code:>1} '
F'{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}'
F'{occupancy:>6.2f}{b_factor:>6.2f} '
F'{element:>2}{charge:>2}'
)
pdb_lines.append(_a )
atom_index += 1
__UpperCamelCase : Dict =i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
__UpperCamelCase : Union[str, Any] =True
__UpperCamelCase : Tuple =chain_index[i + 1]
if should_terminate:
# Close the chain.
__UpperCamelCase : str ="TER"
__UpperCamelCase : str =(
F'{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}'
)
pdb_lines.append(_a )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(_a ,_a ) )
pdb_lines.append('END' )
pdb_lines.append('' )
return "\n".join(_a )
def A ( a_ ) -> Any:
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def A ( a_ ,a_ ,a_ = None ,a_ = None ,a_ = None ,a_ = None ,a_ = None ,) -> int:
return Protein(
aatype=features['aatype'] ,atom_positions=result['final_atom_positions'] ,atom_mask=result['final_atom_mask'] ,residue_index=features['residue_index'] + 1 ,b_factors=b_factors if b_factors is not None else np.zeros_like(result['final_atom_mask'] ) ,chain_index=_a ,remark=_a ,parents=_a ,parents_chain_index=_a ,)
| 364 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
A_ :Tuple = '''platform'''
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def A ( a_ ,a_ ,a_=None ,a_=None ,a_=None ,a_=None ,a_=None ,a_=None ,) -> int:
if attention_mask is None:
__UpperCamelCase : Any =np.where(input_ids != config.pad_token_id ,1 ,0 )
if decoder_attention_mask is None:
__UpperCamelCase : Optional[Any] =np.where(decoder_input_ids != config.pad_token_id ,1 ,0 )
if head_mask is None:
__UpperCamelCase : str =np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__UpperCamelCase : Any =np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__UpperCamelCase : Optional[int] =np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class __A :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=99 , lowerCamelCase__=16 , lowerCamelCase__=2 , lowerCamelCase__=4 , lowerCamelCase__=4 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=32 , lowerCamelCase__=2 , lowerCamelCase__=1 , lowerCamelCase__=0 , lowerCamelCase__=0.02 , ):
"""simple docstring"""
__UpperCamelCase : Tuple =parent
__UpperCamelCase : str =batch_size
__UpperCamelCase : Optional[Any] =seq_length
__UpperCamelCase : List[Any] =is_training
__UpperCamelCase : int =use_labels
__UpperCamelCase : int =vocab_size
__UpperCamelCase : Any =hidden_size
__UpperCamelCase : List[str] =num_hidden_layers
__UpperCamelCase : Any =num_attention_heads
__UpperCamelCase : int =intermediate_size
__UpperCamelCase : List[Any] =hidden_act
__UpperCamelCase : Optional[Any] =hidden_dropout_prob
__UpperCamelCase : int =attention_probs_dropout_prob
__UpperCamelCase : Tuple =max_position_embeddings
__UpperCamelCase : List[Any] =eos_token_id
__UpperCamelCase : Tuple =pad_token_id
__UpperCamelCase : Any =bos_token_id
__UpperCamelCase : Tuple =initializer_range
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : str =np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
__UpperCamelCase : Any =np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
__UpperCamelCase : Any =shift_tokens_right(lowerCamelCase__ , 1 , 2 )
__UpperCamelCase : List[str] =BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowerCamelCase__ , )
__UpperCamelCase : Dict =prepare_blenderbot_inputs_dict(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return config, inputs_dict
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : Optional[Any] =self.prepare_config_and_inputs()
return config, inputs_dict
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =20
__UpperCamelCase : Optional[int] =model_class_name(lowerCamelCase__ )
__UpperCamelCase : Tuple =model.encode(inputs_dict['input_ids'] )
__UpperCamelCase , __UpperCamelCase : Optional[Any] =(
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
__UpperCamelCase : Any =model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Tuple =jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
__UpperCamelCase : List[str] =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCamelCase : Any =model.decode(
decoder_input_ids[:, :-1] , lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , decoder_position_ids=lowerCamelCase__ , )
__UpperCamelCase : List[str] =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
__UpperCamelCase : Tuple =model.decode(
decoder_input_ids[:, -1:] , lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCamelCase__ , )
__UpperCamelCase : Tuple =model.decode(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Tuple =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}' )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =20
__UpperCamelCase : int =model_class_name(lowerCamelCase__ )
__UpperCamelCase : Optional[int] =model.encode(inputs_dict['input_ids'] )
__UpperCamelCase , __UpperCamelCase : Any =(
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
__UpperCamelCase : int =jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__UpperCamelCase : List[str] =model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Any =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCamelCase : Any =model.decode(
decoder_input_ids[:, :-1] , lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , decoder_position_ids=lowerCamelCase__ , )
__UpperCamelCase : Optional[Any] =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
__UpperCamelCase : Tuple =model.decode(
decoder_input_ids[:, -1:] , lowerCamelCase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCamelCase__ , decoder_position_ids=lowerCamelCase__ , )
__UpperCamelCase : Optional[int] =model.decode(lowerCamelCase__ , lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ )
__UpperCamelCase : Tuple =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}' )
@require_flax
class __A ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ : Any =9_9
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Any =np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
__UpperCamelCase : int =input_ids.shape[0]
__UpperCamelCase : List[str] =BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : int =self._get_config_and_data()
__UpperCamelCase : Optional[Any] =FlaxBlenderbotSmallForConditionalGeneration(lowerCamelCase__ )
__UpperCamelCase : int =lm_model(input_ids=lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =(batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
__UpperCamelCase : List[str] =FlaxBlenderbotSmallForConditionalGeneration(lowerCamelCase__ )
__UpperCamelCase : Any =np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
__UpperCamelCase : int =np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
__UpperCamelCase : Any =lm_model(input_ids=lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ )
__UpperCamelCase : Optional[Any] =(*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[int] =np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
__UpperCamelCase : Tuple =shift_tokens_right(lowerCamelCase__ , 1 , 2 )
__UpperCamelCase : Optional[int] =np.equal(lowerCamelCase__ , 1 ).astype(np.floataa ).sum()
__UpperCamelCase : Optional[Any] =np.equal(lowerCamelCase__ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(lowerCamelCase__ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class __A ( a , unittest.TestCase , a ):
"""simple docstring"""
UpperCamelCase__ : Any =True
UpperCamelCase__ : List[Any] =(
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
UpperCamelCase__ : Dict =(FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[int] =FlaxBlenderbotSmallModelTester(self )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : Dict =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : Any =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCamelCase : Tuple =self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =model_class(lowerCamelCase__ )
@jax.jit
def encode_jitted(lowerCamelCase__ , lowerCamelCase__=None , **lowerCamelCase__ ):
return model.encode(input_ids=lowerCamelCase__ , attention_mask=lowerCamelCase__ )
with self.subTest('JIT Enabled' ):
__UpperCamelCase : Union[str, Any] =encode_jitted(**lowerCamelCase__ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__UpperCamelCase : Any =encode_jitted(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
for jitted_output, output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCamelCase : Tuple =model_class(lowerCamelCase__ )
__UpperCamelCase : Tuple =model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
__UpperCamelCase : Any ={
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
return model.decode(
decoder_input_ids=lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ , encoder_outputs=lowerCamelCase__ , )
with self.subTest('JIT Enabled' ):
__UpperCamelCase : Optional[Any] =decode_jitted(**lowerCamelCase__ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__UpperCamelCase : int =decode_jitted(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
for jitted_output, output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
__UpperCamelCase : Optional[Any] =model_class_name.from_pretrained('facebook/blenderbot_small-90M' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__UpperCamelCase : Optional[Any] =np.ones((1, 1) ) * model.config.eos_token_id
__UpperCamelCase : Union[str, Any] =model(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
| 245 | 0 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
__snake_case : Optional[int] = logging.get_logger(__name__)
__snake_case : Any = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__snake_case : Tuple = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
__snake_case : int = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
__snake_case : Union[str, Any] = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
__snake_case : List[str] = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 5_12,
"""facebook/dpr-ctx_encoder-multiset-base""": 5_12,
}
__snake_case : int = {
"""facebook/dpr-question_encoder-single-nq-base""": 5_12,
"""facebook/dpr-question_encoder-multiset-base""": 5_12,
}
__snake_case : Any = {
"""facebook/dpr-reader-single-nq-base""": 5_12,
"""facebook/dpr-reader-multiset-base""": 5_12,
}
__snake_case : Optional[int] = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
__snake_case : Optional[Any] = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
__snake_case : Union[str, Any] = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class A__(a_ ):
"""simple docstring"""
_A : List[str] = VOCAB_FILES_NAMES
_A : str = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_A : Tuple = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : List[str] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
_A : int = DPRContextEncoderTokenizer
class A__(a_ ):
"""simple docstring"""
_A : str = VOCAB_FILES_NAMES
_A : List[str] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_A : Union[str, Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Dict = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_A : List[Any] = DPRQuestionEncoderTokenizer
__snake_case : Optional[Any] = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
__snake_case : Tuple = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
__snake_case : str = r"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(a_ )
class A__:
"""simple docstring"""
def __call__( self , _lowercase , _lowercase = None , _lowercase = None , _lowercase = False , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = None , **_lowercase , ) -> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
elif titles is None or texts is None:
a_ : List[str] = titles if texts is None else texts
return super().__call__(
_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
a_ : Union[str, Any] = titles if not isinstance(_lowercase , _lowercase ) else [titles]
a_ : List[str] = texts if not isinstance(_lowercase , _lowercase ) else [texts]
a_ : Any = len(_lowercase )
a_ : Dict = questions if not isinstance(_lowercase , _lowercase ) else [questions] * n_passages
assert len(_lowercase ) == len(
_lowercase ), F'''There should be as many titles than texts but got {len(_lowercase )} titles and {len(_lowercase )} texts.'''
a_ : Union[str, Any] = super().__call__(_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase )["""input_ids"""]
a_ : Dict = super().__call__(_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase )["""input_ids"""]
a_ : Optional[Any] = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_lowercase , _lowercase )
]
}
if return_attention_mask is not False:
a_ : List[Any] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
a_ : Union[str, Any] = attention_mask
return self.pad(_lowercase , padding=_lowercase , max_length=_lowercase , return_tensors=_lowercase )
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase = 16 , _lowercase = 64 , _lowercase = 4 , ) -> List[DPRSpanPrediction]:
a_ : Optional[Any] = reader_input["""input_ids"""]
a_ , a_ , a_ : Dict = reader_output[:3]
a_ : Optional[Any] = len(_lowercase )
a_ : int = sorted(range(_lowercase ) , reverse=_lowercase , key=relevance_logits.__getitem__ )
a_ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
a_ : Union[str, Any] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
a_ : str = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
a_ : Any = sequence_ids.index(self.pad_token_id )
else:
a_ : Optional[int] = len(_lowercase )
a_ : Optional[int] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_lowercase , top_spans=_lowercase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_lowercase , start_index=_lowercase , end_index=_lowercase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_lowercase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , ) -> List[DPRSpanPrediction]:
a_ : Dict = []
for start_index, start_score in enumerate(_lowercase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
a_ : Union[str, Any] = sorted(_lowercase , key=lambda _lowercase : x[1] , reverse=_lowercase )
a_ : Dict = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F'''Wrong span indices: [{start_index}:{end_index}]'''
a_ : List[str] = end_index - start_index + 1
assert length <= max_answer_length, F'''Span is too long: {length} > {max_answer_length}'''
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_lowercase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(a_ )
class A__(a_, a_ ):
"""simple docstring"""
_A : List[str] = VOCAB_FILES_NAMES
_A : Any = READER_PRETRAINED_VOCAB_FILES_MAP
_A : Union[str, Any] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Tuple = READER_PRETRAINED_INIT_CONFIGURATION
_A : Optional[Any] = ['''input_ids''', '''attention_mask''']
_A : Union[str, Any] = DPRReaderTokenizer
| 248 |
def _UpperCAmelCase ( ):
'''simple docstring'''
return [
a * b * (1_0_0_0 - a - b)
for a in range(1 , 9_9_9)
for b in range(a__ , 9_9_9)
if (a * a + b * b == (1_0_0_0 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 248 | 1 |
'''simple docstring'''
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase :
"""simple docstring"""
def __init__( self ,a_ ,a_=13 ,a_=7 ,a_=True ,a_=True ,a_=True ,a_=True ,a_=True ,a_=False ,a_=False ,a_=False ,a_=2 ,a_=99 ,a_=0 ,a_=32 ,a_=5 ,a_=4 ,a_=0.1 ,a_=0.1 ,a_=512 ,a_=2 ,a_=0.02 ,a_=2 ,a_=4 ,a_="last" ,a_=True ,a_=None ,a_=0 ,) -> str:
_UpperCAmelCase : Optional[Any] = parent
_UpperCAmelCase : Union[str, Any] = batch_size
_UpperCAmelCase : List[Any] = seq_length
_UpperCAmelCase : Any = is_training
_UpperCAmelCase : List[Any] = use_input_lengths
_UpperCAmelCase : str = use_token_type_ids
_UpperCAmelCase : Tuple = use_labels
_UpperCAmelCase : Optional[int] = gelu_activation
_UpperCAmelCase : str = sinusoidal_embeddings
_UpperCAmelCase : Dict = causal
_UpperCAmelCase : Union[str, Any] = asm
_UpperCAmelCase : str = n_langs
_UpperCAmelCase : List[str] = vocab_size
_UpperCAmelCase : List[Any] = n_special
_UpperCAmelCase : List[str] = hidden_size
_UpperCAmelCase : List[Any] = num_hidden_layers
_UpperCAmelCase : Optional[Any] = num_attention_heads
_UpperCAmelCase : Optional[int] = hidden_dropout_prob
_UpperCAmelCase : List[str] = attention_probs_dropout_prob
_UpperCAmelCase : Optional[int] = max_position_embeddings
_UpperCAmelCase : Union[str, Any] = type_sequence_label_size
_UpperCAmelCase : Any = initializer_range
_UpperCAmelCase : int = num_labels
_UpperCAmelCase : Dict = num_choices
_UpperCAmelCase : Dict = summary_type
_UpperCAmelCase : Dict = use_proj
_UpperCAmelCase : str = scope
_UpperCAmelCase : str = bos_token_id
def _snake_case ( self ) -> Union[str, Any]:
_UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_UpperCAmelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : List[str] = None
if self.use_input_lengths:
_UpperCAmelCase : Optional[int] = (
ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
_UpperCAmelCase : List[Any] = None
if self.use_token_type_ids:
_UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs )
_UpperCAmelCase : str = None
_UpperCAmelCase : Optional[Any] = None
_UpperCAmelCase : Union[str, Any] = None
if self.use_labels:
_UpperCAmelCase : Any = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
_UpperCAmelCase : str = ids_tensor([self.batch_size] ,2 ).float()
_UpperCAmelCase : Any = ids_tensor([self.batch_size] ,self.num_choices )
_UpperCAmelCase : Any = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _snake_case ( self ) -> str:
return XLMConfig(
vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,num_labels=self.num_labels ,bos_token_id=self.bos_token_id ,)
def _snake_case ( self ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,) -> Dict:
_UpperCAmelCase : int = XLMModel(config=a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase : List[str] = model(a_ ,lengths=a_ ,langs=a_ )
_UpperCAmelCase : int = model(a_ ,langs=a_ )
_UpperCAmelCase : Optional[Any] = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,) -> Optional[int]:
_UpperCAmelCase : Any = XLMWithLMHeadModel(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase : List[str] = model(a_ ,token_type_ids=a_ ,labels=a_ )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,) -> Dict:
_UpperCAmelCase : str = XLMForQuestionAnsweringSimple(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase : List[str] = model(a_ )
_UpperCAmelCase : List[str] = model(a_ ,start_positions=a_ ,end_positions=a_ )
_UpperCAmelCase : Any = outputs
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _snake_case ( self ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,) -> int:
_UpperCAmelCase : List[Any] = XLMForQuestionAnswering(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase : Optional[Any] = model(a_ )
_UpperCAmelCase : Tuple = model(
a_ ,start_positions=a_ ,end_positions=a_ ,cls_index=a_ ,is_impossible=a_ ,p_mask=a_ ,)
_UpperCAmelCase : Optional[int] = model(
a_ ,start_positions=a_ ,end_positions=a_ ,cls_index=a_ ,is_impossible=a_ ,)
(_UpperCAmelCase) : Tuple = result_with_labels.to_tuple()
_UpperCAmelCase : Optional[Any] = model(a_ ,start_positions=a_ ,end_positions=a_ )
(_UpperCAmelCase) : Tuple = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape ,() )
self.parent.assertEqual(result.start_top_log_probs.shape ,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape ,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape ,(self.batch_size,) )
def _snake_case ( self ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,) -> List[Any]:
_UpperCAmelCase : Optional[Any] = XLMForSequenceClassification(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase : List[str] = model(a_ )
_UpperCAmelCase : Dict = model(a_ ,labels=a_ )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,) -> List[Any]:
_UpperCAmelCase : Union[str, Any] = self.num_labels
_UpperCAmelCase : Dict = XLMForTokenClassification(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase : Dict = model(a_ ,attention_mask=a_ ,labels=a_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,) -> str:
_UpperCAmelCase : str = self.num_choices
_UpperCAmelCase : Dict = XLMForMultipleChoice(config=a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase : Any = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
_UpperCAmelCase : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
_UpperCAmelCase : List[Any] = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
_UpperCAmelCase : Dict = model(
a_ ,attention_mask=a_ ,token_type_ids=a_ ,labels=a_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _snake_case ( self ) -> int:
_UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
(
_UpperCAmelCase
) : List[str] = config_and_inputs
_UpperCAmelCase : int = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """lengths""": input_lengths}
return config, inputs_dict
@require_torch
class lowercase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCAmelCase = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCAmelCase = (
{
"""feature-extraction""": XLMModel,
"""fill-mask""": XLMWithLMHeadModel,
"""question-answering""": XLMForQuestionAnsweringSimple,
"""text-classification""": XLMForSequenceClassification,
"""text-generation""": XLMWithLMHeadModel,
"""token-classification""": XLMForTokenClassification,
"""zero-shot""": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _snake_case ( self ,a_ ,a_ ,a_ ,a_ ,a_ ) -> Tuple:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _snake_case ( self ,a_ ,a_ ,a_=False ) -> int:
_UpperCAmelCase : Any = super()._prepare_for_class(a_ ,a_ ,return_labels=a_ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
_UpperCAmelCase : Optional[int] = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=a_ )
_UpperCAmelCase : str = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=a_ )
return inputs_dict
def _snake_case ( self ) -> str:
_UpperCAmelCase : Optional[Any] = XLMModelTester(self )
_UpperCAmelCase : str = ConfigTester(self ,config_class=a_ ,emb_dim=37 )
def _snake_case ( self ) -> Any:
self.config_tester.run_common_tests()
def _snake_case ( self ) -> List[str]:
_UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*a_ )
def _snake_case ( self ) -> str:
_UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*a_ )
def _snake_case ( self ) -> Tuple:
_UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*a_ )
def _snake_case ( self ) -> Optional[Any]:
_UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*a_ )
def _snake_case ( self ) -> Optional[Any]:
_UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*a_ )
def _snake_case ( self ) -> str:
_UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*a_ )
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*a_ )
def _snake_case ( self ,a_ ,a_ ,a_ ,a_ ,a_ ,a_=False ,a_=1 ) -> Optional[int]:
self.assertIsInstance(a_ ,a_ )
self.assertListEqual(
[isinstance(a_ ,a_ ) for iter_attentions in attentions] ,[True] * len(a_ ) )
self.assertEqual(len(a_ ) ,(max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(a_ ):
# adds PAD dummy token
_UpperCAmelCase : Dict = min_length + idx + 1
_UpperCAmelCase : List[str] = min_length + idx + 1
_UpperCAmelCase : Optional[Any] = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] ,[expected_shape] * len(a_ ) )
def _snake_case ( self ,a_ ,a_ ,a_ ,a_ ,a_ ,a_=False ,a_=1 ) -> List[str]:
self.assertIsInstance(a_ ,a_ )
self.assertListEqual(
[isinstance(a_ ,a_ ) for iter_hidden_states in hidden_states] ,[True] * len(a_ ) ,)
self.assertEqual(len(a_ ) ,(max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(a_ ):
# adds PAD dummy token
_UpperCAmelCase : Tuple = min_length + idx + 1
_UpperCAmelCase : List[Any] = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] ,[expected_shape] * len(a_ ) ,)
pass
@slow
def _snake_case ( self ) -> int:
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Optional[Any] = XLMModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@require_torch
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _snake_case ( self ) -> str:
_UpperCAmelCase : List[Any] = XLMWithLMHeadModel.from_pretrained("""xlm-mlm-en-2048""" )
model.to(a_ )
_UpperCAmelCase : Union[str, Any] = torch.tensor([[14, 447]] ,dtype=torch.long ,device=a_ ) # the president
_UpperCAmelCase : Optional[int] = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
_UpperCAmelCase : str = model.generate(a_ ,do_sample=a_ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() ,a_ )
| 356 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A_ : str = {
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = [
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[int] = [
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = [
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
A_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 349 | 0 |
from __future__ import annotations
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = []
create_all_state(1 ,UpperCamelCase_ ,UpperCamelCase_ ,[] ,UpperCamelCase_ )
return result
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,):
"""simple docstring"""
if level == 0:
total_list.append(current_list[:] )
return
for i in range(UpperCamelCase_ ,total_number - level + 2 ):
current_list.append(UpperCamelCase_ )
create_all_state(i + 1 ,UpperCamelCase_ ,level - 1 ,UpperCamelCase_ ,UpperCamelCase_ )
current_list.pop()
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
for i in total_list:
print(*UpperCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : str = 4
_SCREAMING_SNAKE_CASE : Any = 2
_SCREAMING_SNAKE_CASE : List[str] = generate_all_combinations(n, k)
print_all_state(total_list)
| 127 |
from __future__ import annotations
_SCREAMING_SNAKE_CASE : Optional[int] = []
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
for i in range(len(UpperCamelCase_ ) ):
if board[row][i] == 1:
return False
for i in range(len(UpperCamelCase_ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(UpperCamelCase_ ,-1 ,-1 ) ,range(UpperCamelCase_ ,-1 ,-1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(UpperCamelCase_ ,-1 ,-1 ) ,range(UpperCamelCase_ ,len(UpperCamelCase_ ) ) ):
if board[i][j] == 1:
return False
return True
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
if row >= len(UpperCamelCase_ ):
solution.append(UpperCamelCase_ )
printboard(UpperCamelCase_ )
print()
return True
for i in range(len(UpperCamelCase_ ) ):
if is_safe(UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ):
snake_case = 1
solve(UpperCamelCase_ ,row + 1 )
snake_case = 0
return False
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
for i in range(len(UpperCamelCase_ ) ):
for j in range(len(UpperCamelCase_ ) ):
if board[i][j] == 1:
print('''Q''' ,end=''' ''' )
else:
print('''.''' ,end=''' ''' )
print()
# n=int(input("The no. of queens"))
_SCREAMING_SNAKE_CASE : Tuple = 8
_SCREAMING_SNAKE_CASE : List[Any] = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("The total no. of solutions are :", len(solution))
| 127 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__UpperCamelCase = [
"small",
"small-base",
"medium",
"medium-base",
"intermediate",
"intermediate-base",
"large",
"large-base",
"xlarge",
"xlarge-base",
]
__UpperCamelCase = {
"vocab_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json",
"funnel-transformer/small-base": (
"https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"
),
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json",
"funnel-transformer/large-base": (
"https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"
),
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"
),
},
}
__UpperCamelCase = {f"""funnel-transformer/{name}""": 512 for name in _model_names}
__UpperCamelCase = {f"""funnel-transformer/{name}""": {"do_lower_case": True} for name in _model_names}
class _A ( __lowercase ):
lowercase__: Optional[Any] = VOCAB_FILES_NAMES
lowercase__: int = PRETRAINED_VOCAB_FILES_MAP
lowercase__: Dict = PRETRAINED_INIT_CONFIGURATION
lowercase__: Dict = FunnelTokenizer
lowercase__: Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__: int = 2
def __init__( self : Optional[Any] , __magic_name__ : Dict=None , __magic_name__ : List[Any]=None , __magic_name__ : str=True , __magic_name__ : Dict="<unk>" , __magic_name__ : str="<sep>" , __magic_name__ : int="<pad>" , __magic_name__ : Optional[Any]="<cls>" , __magic_name__ : Dict="<mask>" , __magic_name__ : Union[str, Any]="<s>" , __magic_name__ : str="</s>" , __magic_name__ : Union[str, Any]=True , __magic_name__ : Optional[Any]=True , __magic_name__ : Optional[int]=None , __magic_name__ : str="##" , **__magic_name__ : Union[str, Any] , ) -> Dict:
"""simple docstring"""
super().__init__(
__magic_name__ , tokenizer_file=__magic_name__ , do_lower_case=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , clean_text=__magic_name__ , tokenize_chinese_chars=__magic_name__ , strip_accents=__magic_name__ , wordpieces_prefix=__magic_name__ , **__magic_name__ , )
__snake_case : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , __magic_name__ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , __magic_name__ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , __magic_name__ ) != tokenize_chinese_chars
):
__snake_case : Any = getattr(__magic_name__ , normalizer_state.pop("""type""" ) )
__snake_case : Tuple = do_lower_case
__snake_case : List[Any] = strip_accents
__snake_case : Dict = tokenize_chinese_chars
__snake_case : Optional[int] = normalizer_class(**__magic_name__ )
__snake_case : Union[str, Any] = do_lower_case
def lowercase__ ( self : int , __magic_name__ : Any , __magic_name__ : Optional[int]=None ) -> Any:
"""simple docstring"""
__snake_case : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase__ ( self : Any , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__snake_case : Tuple = [self.sep_token_id]
__snake_case : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
__snake_case : Union[str, Any] = self._tokenizer.model.save(__magic_name__ , name=__magic_name__ )
return tuple(__magic_name__ )
| 13 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _A ( unittest.TestCase ):
@slow
def lowercase__ ( self : List[str] ) -> int:
"""simple docstring"""
__snake_case : List[Any] = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
__snake_case : Tuple = tf.convert_to_tensor(
[[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
__snake_case : List[str] = model(__magic_name__ )["""last_hidden_state"""]
__snake_case : Any = tf.TensorShape((1, 10, 7_68) )
self.assertEqual(output.shape , __magic_name__ )
# compare the actual values for a slice.
__snake_case : str = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 13 | 1 |
import itertools
import math
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCamelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def SCREAMING_SNAKE_CASE__ ( ) -> str:
__lowerCamelCase : List[str] = 2
while True:
if is_prime(lowerCamelCase__ ):
yield num
num += 1
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = 1_0_0_0_1 ) -> int:
return next(itertools.islice(prime_generator() , nth - 1 , lowerCamelCase__ ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 73 | import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
UpperCamelCase = '''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
UpperCamelCase = '''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
UpperCamelCase = '''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[
"https://github.com/jhclark/tercom",
] , )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , ) -> Any:
lowercase__ : Optional[int] = len(references[0] )
if any(len(lowercase_ ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
lowercase__ : Union[str, Any] = [[refs[i] for refs in references] for i in range(lowercase_ )]
lowercase__ : str = TER(
normalized=lowercase_ , no_punct=lowercase_ , asian_support=lowercase_ , case_sensitive=lowercase_ , )
lowercase__ : List[str] = sb_ter.corpus_score(lowercase_ , lowercase_ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 87 | 0 |
'''simple docstring'''
from __future__ import annotations
import typing
from collections import Counter
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
_a : typing.Counter[int] = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(__a , max_perimeter + 1 ):
_a : Any = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(__a ):
_a : Tuple = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def UpperCAmelCase_ (__a : int = 1_0_0_0 ):
"""simple docstring"""
_a : Any = pythagorean_triple(__a )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f'''Perimeter {solution()} has maximum solutions''')
| 365 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def UpperCAmelCase_ (__a : str , __a : Dict=0.999 , __a : List[str]="cosine" , ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(__a : Union[str, Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__a : int ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_a : Tuple = []
for i in range(__a ):
_a : Union[str, Any] = i / num_diffusion_timesteps
_a : Any = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__a ) / alpha_bar_fn(__a ) , __a ) )
return torch.tensor(__a , dtype=torch.floataa )
class UpperCAmelCase__ ( lowercase__ , lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = [e.name for e in KarrasDiffusionSchedulers]
__UpperCAmelCase : Dict = 2
@register_to_config
def __init__( self : str ,_a : int = 1000 ,_a : float = 0.0_0085 ,_a : float = 0.012 ,_a : str = "linear" ,_a : Optional[Union[np.ndarray, List[float]]] = None ,_a : str = "epsilon" ,_a : Optional[bool] = False ,_a : Optional[bool] = False ,_a : float = 1.0 ,_a : str = "linspace" ,_a : int = 0 ,):
'''simple docstring'''
if trained_betas is not None:
_a : List[str] = torch.tensor(_a ,dtype=torch.floataa )
elif beta_schedule == "linear":
_a : Tuple = torch.linspace(_a ,_a ,_a ,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_a : List[str] = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,_a ,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_a : Dict = betas_for_alpha_bar(_a ,alpha_transform_type='cosine' )
elif beta_schedule == "exp":
_a : Tuple = betas_for_alpha_bar(_a ,alpha_transform_type='exp' )
else:
raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" )
_a : Optional[Any] = 1.0 - self.betas
_a : Optional[int] = torch.cumprod(self.alphas ,dim=0 )
# set all values
self.set_timesteps(_a ,_a ,_a )
_a : Optional[int] = use_karras_sigmas
def __lowercase ( self : Any ,_a : Union[str, Any] ,_a : Optional[Any]=None ):
'''simple docstring'''
if schedule_timesteps is None:
_a : List[Any] = self.timesteps
_a : Dict = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_a : int = 1 if len(_a ) > 1 else 0
else:
_a : str = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
_a : str = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __lowercase ( self : int ,_a : torch.FloatTensor ,_a : Union[float, torch.FloatTensor] ,):
'''simple docstring'''
_a : List[Any] = self.index_for_timestep(_a )
_a : Tuple = self.sigmas[step_index]
_a : Optional[Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __lowercase ( self : Any ,_a : int ,_a : Union[str, torch.device] = None ,_a : Optional[int] = None ,):
'''simple docstring'''
_a : Optional[Any] = num_inference_steps
_a : Dict = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_a : Optional[Any] = np.linspace(0 ,num_train_timesteps - 1 ,_a ,dtype=_a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_a : str = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a : int = (np.arange(0 ,_a ) * step_ratio).round()[::-1].copy().astype(_a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_a : Any = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a : Union[str, Any] = (np.arange(_a ,0 ,-step_ratio )).round().copy().astype(_a )
timesteps -= 1
else:
raise ValueError(
F"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
_a : Tuple = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_a : Union[str, Any] = np.log(_a )
_a : str = np.interp(_a ,np.arange(0 ,len(_a ) ) ,_a )
if self.config.use_karras_sigmas:
_a : List[Any] = self._convert_to_karras(in_sigmas=_a ,num_inference_steps=self.num_inference_steps )
_a : Dict = np.array([self._sigma_to_t(_a ,_a ) for sigma in sigmas] )
_a : int = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_a : Union[str, Any] = torch.from_numpy(_a ).to(device=_a )
_a : Any = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
_a : List[Any] = torch.from_numpy(_a )
_a : List[str] = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(_a ).startswith('mps' ):
# mps does not support float64
_a : Tuple = timesteps.to(_a ,dtype=torch.floataa )
else:
_a : Dict = timesteps.to(device=_a )
# empty dt and derivative
_a : Tuple = None
_a : Optional[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_a : Union[str, Any] = defaultdict(_a )
def __lowercase ( self : str ,_a : Dict ,_a : Dict ):
'''simple docstring'''
_a : Optional[int] = np.log(_a )
# get distribution
_a : Union[str, Any] = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
_a : List[Any] = np.cumsum((dists >= 0) ,axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
_a : Tuple = low_idx + 1
_a : Union[str, Any] = log_sigmas[low_idx]
_a : Optional[Any] = log_sigmas[high_idx]
# interpolate sigmas
_a : Optional[Any] = (low - log_sigma) / (low - high)
_a : List[str] = np.clip(_a ,0 ,1 )
# transform interpolation to time range
_a : Union[str, Any] = (1 - w) * low_idx + w * high_idx
_a : List[str] = t.reshape(sigma.shape )
return t
def __lowercase ( self : int ,_a : torch.FloatTensor ,_a : Tuple ):
'''simple docstring'''
_a : float = in_sigmas[-1].item()
_a : float = in_sigmas[0].item()
_a : Tuple = 7.0 # 7.0 is the value used in the paper
_a : str = np.linspace(0 ,1 ,_a )
_a : Optional[Any] = sigma_min ** (1 / rho)
_a : Union[str, Any] = sigma_max ** (1 / rho)
_a : str = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return self.dt is None
def __lowercase ( self : int ,_a : Union[torch.FloatTensor, np.ndarray] ,_a : Union[float, torch.FloatTensor] ,_a : Union[torch.FloatTensor, np.ndarray] ,_a : bool = True ,):
'''simple docstring'''
_a : Union[str, Any] = self.index_for_timestep(_a )
# advance index counter by 1
_a : Any = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_a : Tuple = self.sigmas[step_index]
_a : int = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
_a : List[str] = self.sigmas[step_index - 1]
_a : List[Any] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_a : Optional[int] = 0
_a : Tuple = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_a : Dict = sigma_hat if self.state_in_first_order else sigma_next
_a : Optional[int] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_a : List[Any] = sigma_hat if self.state_in_first_order else sigma_next
_a : List[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
_a : Union[str, Any] = model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.config.clip_sample:
_a : Optional[int] = pred_original_sample.clamp(
-self.config.clip_sample_range ,self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_a : Optional[Any] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_a : Any = sigma_next - sigma_hat
# store for 2nd order step
_a : int = derivative
_a : List[str] = dt
_a : Union[str, Any] = sample
else:
# 2. 2nd order / Heun's method
_a : Dict = (sample - pred_original_sample) / sigma_next
_a : Tuple = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
_a : Optional[Any] = self.dt
_a : Union[str, Any] = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
_a : List[Any] = None
_a : Union[str, Any] = None
_a : Dict = None
_a : str = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_a )
def __lowercase ( self : Optional[int] ,_a : torch.FloatTensor ,_a : torch.FloatTensor ,_a : torch.FloatTensor ,):
'''simple docstring'''
_a : str = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_a ):
# mps does not support float64
_a : Dict = self.timesteps.to(original_samples.device ,dtype=torch.floataa )
_a : Optional[Any] = timesteps.to(original_samples.device ,dtype=torch.floataa )
else:
_a : int = self.timesteps.to(original_samples.device )
_a : Optional[Any] = timesteps.to(original_samples.device )
_a : Any = [self.index_for_timestep(_a ,_a ) for t in timesteps]
_a : Optional[int] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_a : Optional[Any] = sigma.unsqueeze(-1 )
_a : Any = original_samples + noise * sigma
return noisy_samples
def __len__( self : Optional[int] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 5 | 0 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_A = 16
_A = 32
def lowerCamelCase__ ( __lowerCAmelCase : Accelerator , __lowerCAmelCase : int = 16 ):
"""simple docstring"""
lowerCAmelCase_ = AutoTokenizer.from_pretrained("bert-base-cased" )
lowerCAmelCase_ = load_dataset("glue" , "mrpc" )
def tokenize_function(__lowerCAmelCase : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_lowercase , max_length=_lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase_ = datasets.map(
_lowercase , batched=_lowercase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase_ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__lowerCAmelCase : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase_ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase_ = 16
elif accelerator.mixed_precision != "no":
lowerCAmelCase_ = 8
else:
lowerCAmelCase_ = None
return tokenizer.pad(
_lowercase , padding="longest" , max_length=_lowercase , pad_to_multiple_of=_lowercase , return_tensors="pt" , )
# Instantiate dataloaders.
lowerCAmelCase_ = DataLoader(
tokenized_datasets["train"] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
lowerCAmelCase_ = DataLoader(
tokenized_datasets["validation"] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_A = mocked_dataloaders # noqa: F811
def lowerCamelCase__ ( __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , _lowercase ) == "1":
lowerCAmelCase_ = 2
# New Code #
lowerCAmelCase_ = int(args.gradient_accumulation_steps )
# Initialize accelerator
lowerCAmelCase_ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_lowercase )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase_ = config["""lr"""]
lowerCAmelCase_ = int(config["num_epochs"] )
lowerCAmelCase_ = int(config["seed"] )
lowerCAmelCase_ = int(config["batch_size"] )
lowerCAmelCase_ = evaluate.load("glue" , "mrpc" )
set_seed(_lowercase )
lowerCAmelCase_ = get_dataloaders(_lowercase , _lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase_ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase_ = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase_ = AdamW(params=model.parameters() , lr=_lowercase )
# Instantiate scheduler
lowerCAmelCase_ = get_linear_schedule_with_warmup(
optimizer=_lowercase , num_warmup_steps=100 , num_training_steps=(len(_lowercase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase_ = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# Now we train the model
for epoch in range(_lowercase ):
model.train()
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(_lowercase ):
lowerCAmelCase_ = model(**_lowercase )
lowerCAmelCase_ = output.loss
accelerator.backward(_lowercase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase_ = model(**_lowercase )
lowerCAmelCase_ = outputs.logits.argmax(dim=-1 )
lowerCAmelCase_ = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_lowercase , references=_lowercase , )
lowerCAmelCase_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , _lowercase )
def lowerCamelCase__ ( ):
"""simple docstring"""
lowerCAmelCase_ = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_lowercase , default=_lowercase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
# New Code #
parser.add_argument(
"--gradient_accumulation_steps" , type=_lowercase , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(_lowercase , _lowercase )
if __name__ == "__main__":
main()
| 231 |
from __future__ import annotations
import math
def lowerCAmelCase_ ( _lowercase : int) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_lowercase) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
_lowercase : str =[num for num in range(3, 10_0001, 2) if not is_prime(num)]
def lowerCAmelCase_ ( _lowercase : int) -> list[int]:
"""simple docstring"""
if not isinstance(_lowercase , _lowercase):
raise ValueError("""n must be an integer""")
if n <= 0:
raise ValueError("""n must be >= 0""")
a__ : int = []
for num in range(len(_lowercase)):
a__ : Any = 0
while 2 * i * i <= odd_composites[num]:
a__ : Union[str, Any] = odd_composites[num] - 2 * i * i
if is_prime(_lowercase):
break
i += 1
else:
list_nums.append(odd_composites[num])
if len(_lowercase) == n:
return list_nums
return []
def lowerCAmelCase_ ( ) -> int:
"""simple docstring"""
return compute_nums(1)[0]
if __name__ == "__main__":
print(f'{solution() = }')
| 170 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {'vocab_file': 'sentencepiece.bpe.model'}
__lowerCAmelCase = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
}
}
__lowerCAmelCase = {
'camembert-base': 512,
}
__lowerCAmelCase = '▁'
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ["input_ids", "attention_mask"]
def __init__(self , UpperCAmelCase , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase="</s>" , UpperCAmelCase="<s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<mask>" , UpperCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] , UpperCAmelCase = None , **UpperCAmelCase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_snake_case = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token
_snake_case = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , additional_special_tokens=UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase , )
_snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCAmelCase ) )
_snake_case = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
_snake_case = {"""<s>NOTUSED""": 0, """<pad>""": 1, """</s>NOTUSED""": 2, """<unk>""": 3}
_snake_case = len(self.fairseq_tokens_to_ids )
_snake_case = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
_snake_case = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def lowercase (self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_snake_case = [self.cls_token_id]
_snake_case = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase (self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase )) + [1]
return [1] + ([0] * len(UpperCAmelCase )) + [1, 1] + ([0] * len(UpperCAmelCase )) + [1]
def lowercase (self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase (self ) -> Tuple:
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def lowercase (self ) -> Tuple:
_snake_case = {self.convert_ids_to_tokens(UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase (self , UpperCAmelCase ) -> List[str]:
return self.sp_model.encode(UpperCAmelCase , out_type=UpperCAmelCase )
def lowercase (self , UpperCAmelCase ) -> Tuple:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(UpperCAmelCase ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(UpperCAmelCase )
def lowercase (self , UpperCAmelCase ) -> Optional[int]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowercase (self , UpperCAmelCase ) -> Tuple:
_snake_case = []
_snake_case = """"""
_snake_case = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCAmelCase ) + token
_snake_case = True
_snake_case = []
else:
current_sub_tokens.append(UpperCAmelCase )
_snake_case = False
out_string += self.sp_model.decode(UpperCAmelCase )
return out_string.strip()
def __getstate__(self ) -> Any:
_snake_case = self.__dict__.copy()
_snake_case = None
return state
def __setstate__(self , UpperCAmelCase ) -> List[Any]:
_snake_case = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_snake_case = {}
_snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase (self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_snake_case = os.path.join(
UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase , """wb""" ) as fi:
_snake_case = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase )
return (out_vocab_file,) | 270 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _lowerCAmelCase ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = StableDiffusionPanoramaPipeline
lowerCAmelCase_ = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowercase (self ) -> List[Any]:
torch.manual_seed(0 )
_snake_case = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
_snake_case = DDIMScheduler()
torch.manual_seed(0 )
_snake_case = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
_snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_snake_case = CLIPTextModel(UpperCAmelCase )
_snake_case = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_snake_case = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowercase (self , UpperCAmelCase , UpperCAmelCase=0 ) -> Tuple:
_snake_case = torch.manual_seed(UpperCAmelCase )
_snake_case = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
# Setting height and width to None to prevent OOMs on CPU.
"""height""": None,
"""width""": None,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowercase (self ) -> Tuple:
_snake_case = """cpu""" # ensure determinism for the device-dependent torch.Generator
_snake_case = self.get_dummy_components()
_snake_case = StableDiffusionPanoramaPipeline(**UpperCAmelCase )
_snake_case = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
_snake_case = self.get_dummy_inputs(UpperCAmelCase )
_snake_case = sd_pipe(**UpperCAmelCase ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase (self ) -> Tuple:
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowercase (self ) -> Any:
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.2_5e-3 )
def lowercase (self ) -> Any:
_snake_case = """cpu""" # ensure determinism for the device-dependent torch.Generator
_snake_case = self.get_dummy_components()
_snake_case = StableDiffusionPanoramaPipeline(**UpperCAmelCase )
_snake_case = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
_snake_case = self.get_dummy_inputs(UpperCAmelCase )
_snake_case = """french fries"""
_snake_case = sd_pipe(**UpperCAmelCase , negative_prompt=UpperCAmelCase )
_snake_case = output.images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase (self ) -> str:
_snake_case = """cpu""" # ensure determinism for the device-dependent torch.Generator
_snake_case = self.get_dummy_components()
_snake_case = StableDiffusionPanoramaPipeline(**UpperCAmelCase )
_snake_case = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
_snake_case = self.get_dummy_inputs(UpperCAmelCase )
_snake_case = sd_pipe(**UpperCAmelCase , view_batch_size=2 )
_snake_case = output.images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase (self ) -> Tuple:
_snake_case = """cpu""" # ensure determinism for the device-dependent torch.Generator
_snake_case = self.get_dummy_components()
_snake_case = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
_snake_case = StableDiffusionPanoramaPipeline(**UpperCAmelCase )
_snake_case = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
_snake_case = self.get_dummy_inputs(UpperCAmelCase )
_snake_case = sd_pipe(**UpperCAmelCase ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase (self ) -> str:
_snake_case = """cpu""" # ensure determinism for the device-dependent torch.Generator
_snake_case = self.get_dummy_components()
_snake_case = PNDMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , skip_prk_steps=UpperCAmelCase )
_snake_case = StableDiffusionPanoramaPipeline(**UpperCAmelCase )
_snake_case = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
_snake_case = self.get_dummy_inputs(UpperCAmelCase )
_snake_case = sd_pipe(**UpperCAmelCase ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase (self ) -> Optional[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase (self , UpperCAmelCase=0 ) -> List[str]:
_snake_case = torch.manual_seed(UpperCAmelCase )
_snake_case = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def lowercase (self ) -> List[Any]:
_snake_case = """stabilityai/stable-diffusion-2-base"""
_snake_case = DDIMScheduler.from_pretrained(UpperCAmelCase , subfolder="""scheduler""" )
_snake_case = StableDiffusionPanoramaPipeline.from_pretrained(UpperCAmelCase , scheduler=UpperCAmelCase , safety_checker=UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
_snake_case = self.get_inputs()
_snake_case = pipe(**UpperCAmelCase ).images
_snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
_snake_case = np.array(
[
0.3696_8392,
0.2702_5372,
0.3244_6766,
0.2837_9387,
0.3636_3274,
0.3073_3347,
0.2710_0027,
0.2705_4125,
0.2553_6096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def lowercase (self ) -> str:
_snake_case = StableDiffusionPanoramaPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-base""" , safety_checker=UpperCAmelCase )
_snake_case = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
_snake_case = self.get_inputs()
_snake_case = pipe(**UpperCAmelCase ).images
_snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
_snake_case = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase (self ) -> Optional[int]:
_snake_case = 0
def callback_fn(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> None:
_snake_case = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
_snake_case = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
_snake_case = latents[0, -3:, -3:, -1]
_snake_case = np.array(
[
0.1868_1869,
0.3390_7816,
0.536_1276,
0.1443_2865,
-0.0285_6611,
-0.7394_1123,
0.2339_7987,
0.4732_2682,
-0.3782_3164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
_snake_case = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
_snake_case = latents[0, -3:, -3:, -1]
_snake_case = np.array(
[
0.1853_9645,
0.3398_7248,
0.537_8559,
0.1443_7142,
-0.0245_5261,
-0.733_8317,
0.2399_0755,
0.4735_6272,
-0.378_6505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
_snake_case = False
_snake_case = """stabilityai/stable-diffusion-2-base"""
_snake_case = DDIMScheduler.from_pretrained(UpperCAmelCase , subfolder="""scheduler""" )
_snake_case = StableDiffusionPanoramaPipeline.from_pretrained(UpperCAmelCase , scheduler=UpperCAmelCase , safety_checker=UpperCAmelCase )
_snake_case = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
_snake_case = self.get_inputs()
pipe(**UpperCAmelCase , callback=UpperCAmelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowercase (self ) -> List[Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_snake_case = """stabilityai/stable-diffusion-2-base"""
_snake_case = DDIMScheduler.from_pretrained(UpperCAmelCase , subfolder="""scheduler""" )
_snake_case = StableDiffusionPanoramaPipeline.from_pretrained(UpperCAmelCase , scheduler=UpperCAmelCase , safety_checker=UpperCAmelCase )
_snake_case = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_snake_case = self.get_inputs()
_snake_case = pipe(**UpperCAmelCase )
_snake_case = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9 | 270 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A : List[str] = '▁'
A : str = {'vocab_file': 'spiece.model'}
A : Dict = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'}
}
A : Tuple = {
'google/pegasus-xsum': 5_1_2,
}
A : Tuple = logging.get_logger(__name__)
class __A( a ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ['''input_ids''', '''attention_mask''']
def __init__( self , _snake_case , _snake_case="<pad>" , _snake_case="</s>" , _snake_case="<unk>" , _snake_case="<mask_2>" , _snake_case="<mask_1>" , _snake_case=None , _snake_case=103 , _snake_case = None , **_snake_case , ) -> None:
'''simple docstring'''
__a = offset
if additional_special_tokens is not None:
if not isinstance(_snake_case , _snake_case ):
raise TypeError(
F"""additional_special_tokens should be of type {type(_snake_case )}, but is"""
F""" {type(_snake_case )}""" )
__a = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F"""<unk_{i}>""" for i in range(len(_snake_case ) , self.offset - 1 )
]
if len(set(_snake_case ) ) != len(_snake_case ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
F""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
__a = additional_special_tokens_extended
else:
__a = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F"""<unk_{i}>""" for i in range(2 , self.offset )]
__a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_snake_case , unk_token=_snake_case , mask_token=_snake_case , pad_token=_snake_case , mask_token_sent=_snake_case , offset=_snake_case , additional_special_tokens=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , )
__a = mask_token_sent
__a = vocab_file
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_snake_case )
# add special tokens to encoder dict
__a = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
__a = {v: k for k, v in self.encoder.items()}
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
return len(self.sp_model ) + self.offset
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict[str, int]:
'''simple docstring'''
__a = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
'''simple docstring'''
__a = self.__dict__.copy()
__a = None
return state
def __setstate__( self , _snake_case ) -> Optional[Any]:
'''simple docstring'''
__a = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__a = {}
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(_snake_case , out_type=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> int:
'''simple docstring'''
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
__a = self.sp_model.piece_to_id(_snake_case )
return sp_id + self.offset
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> str:
'''simple docstring'''
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
__a = self.sp_model.IdToPiece(index - self.offset )
return token
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Any:
'''simple docstring'''
__a = []
__a = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_snake_case ) + token
__a = []
else:
current_sub_tokens.append(_snake_case )
out_string += self.sp_model.decode(_snake_case )
return out_string.strip()
def SCREAMING_SNAKE_CASE_ ( self , _snake_case=False ) -> Any:
'''simple docstring'''
return 1
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = None , _snake_case = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(_snake_case )
elif token_ids_a is None:
return self._special_token_mask(_snake_case ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case=None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_snake_case ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__a = os.path.join(
_snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(_snake_case , '''wb''' ) as fi:
__a = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (out_vocab_file,) | 6 |
from __future__ import annotations
import typing
from collections import Counter
def __lowerCAmelCase ( a__ ) -> typing.Counter[int]:
__a = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(a__ , max_perimeter + 1 ):
__a = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(a__ ):
__a = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def __lowerCAmelCase ( a__ = 1000 ) -> int:
__a = pythagorean_triple(a__ )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(F"Perimeter {solution()} has maximum solutions") | 6 | 1 |
import numpy as np
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> np.ndarray:
return 1 / (1 + np.exp(-vector ))
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> np.ndarray:
return vector * sigmoid(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 285 |
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
lowercase : Tuple = logging.get_logger(__name__)
class __snake_case ( lowerCAmelCase ):
_a : Optional[Any]= ["input_features", "attention_mask"]
def __init__( self ,snake_case=80 ,snake_case=16000 ,snake_case=0.0 ,snake_case=10 ,snake_case=25 ,snake_case="hamming_window" ,snake_case=32_768.0 ,snake_case=0.97 ,snake_case=1.0 ,snake_case=True ,snake_case=True ,snake_case=False ,**snake_case ,):
'''simple docstring'''
super().__init__(feature_size=snake_case ,sampling_rate=snake_case ,padding_value=snake_case ,**snake_case )
lowercase : Optional[Any] = feature_size
lowercase : List[Any] = sampling_rate
lowercase : int = padding_value
lowercase : Dict = hop_length
lowercase : List[str] = win_length
lowercase : List[Any] = frame_signal_scale
lowercase : List[Any] = preemphasis_coeff
lowercase : str = mel_floor
lowercase : int = normalize_means
lowercase : List[Any] = normalize_vars
lowercase : List[Any] = win_function
lowercase : int = return_attention_mask
lowercase : Any = win_length * sampling_rate // 1000
lowercase : Tuple = hop_length * sampling_rate // 1000
lowercase : Tuple = optimal_fft_length(self.sample_size )
lowercase : Dict = (self.n_fft // 2) + 1
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
if self.win_function == "hamming_window":
lowercase : Optional[Any] = window_function(window_length=self.sample_size ,name=self.win_function ,periodic=snake_case )
else:
lowercase : Optional[Any] = window_function(window_length=self.sample_size ,name=self.win_function )
lowercase : int = mel_filter_bank(
num_frequency_bins=self.n_freqs ,num_mel_filters=self.feature_size ,min_frequency=0.0 ,max_frequency=self.sampling_rate / 2.0 ,sampling_rate=self.sampling_rate ,)
lowercase : Dict = spectrogram(
one_waveform * self.frame_signal_scale ,window=snake_case ,frame_length=self.sample_size ,hop_length=self.sample_stride ,fft_length=self.n_fft ,center=snake_case ,preemphasis=self.preemphasis_coeff ,mel_filters=snake_case ,mel_floor=self.mel_floor ,log_mel="""log""" ,)
return msfc_features.T
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
if self.normalize_means:
lowercase : List[Any] = x[:input_length].mean(axis=0 )
lowercase : Dict = np.subtract(snake_case ,snake_case )
if self.normalize_vars:
lowercase : List[Any] = x[:input_length].std(axis=0 )
lowercase : List[Any] = np.divide(snake_case ,snake_case )
if input_length < x.shape[0]:
lowercase : Any = padding_value
# make sure array is in float32
lowercase : Tuple = x.astype(np.floataa )
return x
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = None ):
'''simple docstring'''
lowercase : Optional[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(snake_case ,snake_case ,self.padding_value ) for x, n in zip(snake_case ,snake_case )]
def __call__( self ,snake_case ,snake_case = False ,snake_case = None ,snake_case = False ,snake_case = None ,snake_case = None ,snake_case = None ,snake_case = None ,**snake_case ,):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
lowercase : List[Any] = isinstance(snake_case ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
lowercase : str = is_batched_numpy or (
isinstance(snake_case ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
lowercase : List[str] = [np.asarray(snake_case ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(snake_case ,np.ndarray ):
lowercase : int = np.asarray(snake_case ,dtype=np.floataa )
elif isinstance(snake_case ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase : Any = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase : Dict = [raw_speech]
# extract fbank features
lowercase : Tuple = [self._extract_mfsc_features(snake_case ) for one_waveform in raw_speech]
# convert into correct format for padding
lowercase : Union[str, Any] = BatchFeature({"""input_features""": features} )
lowercase : Optional[int] = self.pad(
snake_case ,padding=snake_case ,max_length=snake_case ,truncation=snake_case ,pad_to_multiple_of=snake_case ,return_attention_mask=snake_case ,**snake_case ,)
# make sure list is in array format
lowercase : Tuple = padded_inputs.get("""input_features""" )
if isinstance(input_features[0] ,snake_case ):
lowercase : List[Any] = [np.asarray(snake_case ,dtype=np.floataa ) for feature in input_features]
lowercase : int = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
lowercase : Any = [np.asarray(snake_case ,dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
lowercase : List[str] = (
np.array(snake_case ,dtype=np.intaa )
if self._get_padding_strategies(snake_case ,max_length=snake_case ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
lowercase : List[str] = self.normalize(
padded_inputs["""input_features"""] ,attention_mask=snake_case )
if return_tensors is not None:
lowercase : str = padded_inputs.convert_to_tensors(snake_case )
return padded_inputs
| 285 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
"""andreasmadsen/efficient_mlm_m0.40""": (
"""https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"""
),
}
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Optional[int] = "roberta-prelayernorm"
def __init__( self : Optional[Any] ,lowerCamelCase__ : Dict=50265 ,lowerCamelCase__ : str=768 ,lowerCamelCase__ : Optional[Any]=12 ,lowerCamelCase__ : str=12 ,lowerCamelCase__ : int=3072 ,lowerCamelCase__ : str="gelu" ,lowerCamelCase__ : int=0.1 ,lowerCamelCase__ : List[str]=0.1 ,lowerCamelCase__ : Any=512 ,lowerCamelCase__ : int=2 ,lowerCamelCase__ : Dict=0.02 ,lowerCamelCase__ : Any=1e-1_2 ,lowerCamelCase__ : Optional[int]=1 ,lowerCamelCase__ : str=0 ,lowerCamelCase__ : str=2 ,lowerCamelCase__ : Any="absolute" ,lowerCamelCase__ : str=True ,lowerCamelCase__ : int=None ,**lowerCamelCase__ : Optional[Any] ,) -> str:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase__ ,bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = position_embedding_type
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = classifier_dropout
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
SCREAMING_SNAKE_CASE = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 296 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
SCREAMING_SNAKE_CASE_ = random.Random()
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple:
'''simple docstring'''
if rng is None:
SCREAMING_SNAKE_CASE = global_rng
SCREAMING_SNAKE_CASE = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[int]=7 ,lowerCamelCase__ : Optional[Any]=400 ,lowerCamelCase__ : List[str]=2000 ,lowerCamelCase__ : List[str]=2048 ,lowerCamelCase__ : Any=128 ,lowerCamelCase__ : List[str]=1 ,lowerCamelCase__ : str=512 ,lowerCamelCase__ : Optional[Any]=30 ,lowerCamelCase__ : Tuple=44100 ,) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = min_seq_length
SCREAMING_SNAKE_CASE = max_seq_length
SCREAMING_SNAKE_CASE = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE = spectrogram_length
SCREAMING_SNAKE_CASE = feature_size
SCREAMING_SNAKE_CASE = num_audio_channels
SCREAMING_SNAKE_CASE = hop_length
SCREAMING_SNAKE_CASE = chunk_length
SCREAMING_SNAKE_CASE = sampling_rate
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,lowerCamelCase__ : Tuple=False ,lowerCamelCase__ : Union[str, Any]=False ) -> str:
'''simple docstring'''
def _flatten(lowerCamelCase__ : List[Any] ):
return list(itertools.chain(*lowerCamelCase__ ) )
if equal_length:
SCREAMING_SNAKE_CASE = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE = [np.asarray(lowerCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCamelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case : List[Any] = TvltFeatureExtractor
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = TvltFeatureExtractionTester(self )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(lowerCamelCase__ ,"""spectrogram_length""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""feature_size""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""num_audio_channels""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""hop_length""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""chunk_length""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""sampling_rate""" ) )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE = feat_extract_first.save_pretrained(lowerCamelCase__ )[0]
check_json_file_has_correct_format(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.feature_extraction_class.from_pretrained(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE = dict_first.pop("""mel_filters""" )
SCREAMING_SNAKE_CASE = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase__ ,"""feat_extract.json""" )
feat_extract_first.to_json_file(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.feature_extraction_class.from_json_file(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE = dict_first.pop("""mel_filters""" )
SCREAMING_SNAKE_CASE = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )]
SCREAMING_SNAKE_CASE = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE = feature_extractor(np_speech_inputs[0] ,return_tensors="""np""" ,sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
SCREAMING_SNAKE_CASE = feature_extractor(lowerCamelCase__ ,return_tensors="""np""" ,sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
SCREAMING_SNAKE_CASE = feature_extractor(
lowerCamelCase__ ,return_tensors="""np""" ,sampling_rate=44100 ,mask_audio=lowerCamelCase__ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE = np.asarray(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = feature_extractor(lowerCamelCase__ ,return_tensors="""np""" ,sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def SCREAMING_SNAKE_CASE__ ( self : Any ,lowerCamelCase__ : str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" ,"""clean""" ,split="""validation""" )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE = ds.sort("""id""" ).select(range(lowerCamelCase__ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE = TvltFeatureExtractor()
SCREAMING_SNAKE_CASE = feature_extractor(lowerCamelCase__ ,return_tensors="""pt""" ).audio_values
self.assertEquals(audio_values.shape ,(1, 1, 192, 128) )
SCREAMING_SNAKE_CASE = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] ,lowerCamelCase__ ,atol=1e-4 ) )
| 296 | 1 |
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int = 1000 ) -> int:
return sum(e for e in range(3 , _lowerCamelCase ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 370 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: str , lowerCAmelCase: float | Decimal , lowerCAmelCase: float = 10**-10 ) -> float:
_UpperCAmelCase : Optional[int] = a
while True:
_UpperCAmelCase : Tuple = Decimal(lowerCAmelCase ) - (
Decimal(eval(lowerCAmelCase ) ) / Decimal(eval(str(diff(lowerCAmelCase ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(lowerCAmelCase ) ) < precision: # noqa: S307
return float(lowerCAmelCase )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''')
# Find root of polynomial
print(F'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}''')
# Find Square Root of 5
print(F'''The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}''')
# Exponential Roots
print(F'''The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}''')
| 189 | 0 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
__a :str = logging.get_logger(__name__)
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : List[str] , *UpperCAmelCase : Any , **UpperCAmelCase : List[Any] ):
warnings.warn(
"The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use GLPNImageProcessor instead." , UpperCAmelCase , )
super().__init__(*UpperCAmelCase , **UpperCAmelCase ) | 312 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=snake_case_ )
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : str = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
_lowerCamelCase : ClassVar[Features] = Features({'audio': Audio()} )
_lowerCamelCase : ClassVar[Features] = Features({'labels': ClassLabel} )
_lowerCamelCase : str = "audio"
_lowerCamelCase : str = "labels"
def __A ( self : str , UpperCAmelCase : List[Any] ):
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , UpperCAmelCase ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
A_ = copy.deepcopy(self )
A_ = self.label_schema.copy()
A_ = features[self.label_column]
A_ = label_schema
return task_template
@property
def __A ( self : List[str] ):
return {
self.audio_column: "audio",
self.label_column: "labels",
} | 312 | 1 |
'''simple docstring'''
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class A__ :
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase__ : int , ) -> Any:
"""simple docstring"""
_UpperCAmelCase : List[Any] = parent
_UpperCAmelCase : str = 1_3
_UpperCAmelCase : Optional[Any] = 7
_UpperCAmelCase : Optional[int] = 3_0
_UpperCAmelCase : List[str] = self.seq_length + self.mem_len
_UpperCAmelCase : Tuple = 1_5
_UpperCAmelCase : int = True
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : Optional[Any] = 9_9
_UpperCAmelCase : Union[str, Any] = [1_0, 5_0, 8_0]
_UpperCAmelCase : List[str] = 3_2
_UpperCAmelCase : int = 3_2
_UpperCAmelCase : Dict = 4
_UpperCAmelCase : Dict = 8
_UpperCAmelCase : int = 1_2_8
_UpperCAmelCase : int = 2
_UpperCAmelCase : Union[str, Any] = 2
_UpperCAmelCase : int = None
_UpperCAmelCase : Optional[Any] = 1
_UpperCAmelCase : Optional[int] = 0
_UpperCAmelCase : Tuple = 3
_UpperCAmelCase : Tuple = self.vocab_size - 1
_UpperCAmelCase : Tuple = 0.01
def _lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Any = None
if self.use_labels:
_UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Union[str, Any] = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def _lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
random.seed(self.seed )
tf.random.set_seed(self.seed )
def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = TFTransfoXLModel(lowerCAmelCase__ )
_UpperCAmelCase , _UpperCAmelCase : Any = model(lowerCAmelCase__ ).to_tuple()
_UpperCAmelCase : List[str] = {"input_ids": input_ids_a, "mems": mems_a}
_UpperCAmelCase , _UpperCAmelCase : Tuple = model(lowerCAmelCase__ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Any ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : List[str] = TFTransfoXLLMHeadModel(lowerCAmelCase__ )
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = model(lowerCAmelCase__ ).to_tuple()
_UpperCAmelCase : Optional[int] = {"input_ids": input_ids_a, "labels": lm_labels}
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = model(lowerCAmelCase__ ).to_tuple()
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = model([input_ids_a, mems_a] ).to_tuple()
_UpperCAmelCase : Tuple = {"input_ids": input_ids_a, "mems": mems_a, "labels": lm_labels}
_UpperCAmelCase , _UpperCAmelCase : Any = model(lowerCAmelCase__ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def _lowerCAmelCase ( self : str , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple ) -> str:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = TFTransfoXLForSequenceClassification(lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) : List[Any] = config_and_inputs
_UpperCAmelCase : Any = {"input_ids": input_ids_a}
return config, inputs_dict
@require_tf
class A__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : int = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
UpperCamelCase_ : Union[str, Any] = () if is_tf_available() else ()
UpperCamelCase_ : Tuple = (
{
'''feature-extraction''': TFTransfoXLModel,
'''text-classification''': TFTransfoXLForSequenceClassification,
'''text-generation''': TFTransfoXLLMHeadModel,
'''zero-shot''': TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
UpperCamelCase_ : Optional[Any] = False
UpperCamelCase_ : Dict = False
UpperCamelCase_ : List[Any] = False
UpperCamelCase_ : Optional[Any] = False
def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : int ) -> Optional[Any]:
"""simple docstring"""
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def _lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
_UpperCAmelCase : int = TFTransfoXLModelTester(self )
_UpperCAmelCase : Union[str, Any] = ConfigTester(self , config_class=lowerCAmelCase__ , d_embed=3_7 )
def _lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
self.model_tester.set_seed()
_UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*lowerCAmelCase__ )
def _lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
self.model_tester.set_seed()
_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*lowerCAmelCase__ )
def _lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*lowerCAmelCase__ )
def _lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : str = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
_UpperCAmelCase : Optional[int] = model_class(lowerCAmelCase__ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
_UpperCAmelCase : List[Any] = model.get_output_embeddings()
assert isinstance(lowerCAmelCase__ , tf.keras.layers.Layer )
_UpperCAmelCase : str = model.get_bias()
assert name is None
else:
_UpperCAmelCase : Optional[Any] = model.get_output_embeddings()
assert x is None
_UpperCAmelCase : int = model.get_bias()
assert name is None
def _lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
pass
@slow
def _lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Tuple = TFTransfoXLModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@unittest.skip(reason="This model doesn't play well with fit() due to not returning a single loss." )
def _lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
pass
@require_tf
class A__ ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip("Skip test until #12651 is resolved." )
@slow
def _lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
_UpperCAmelCase : str = TFTransfoXLLMHeadModel.from_pretrained("transfo-xl-wt103" )
# fmt: off
_UpperCAmelCase : List[str] = tf.convert_to_tensor([[3_3,1_2_9_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_2,1_7_0_6,1_7,2_0_0_9_8,5,3_2_1_5,2_1,3_7,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,6_2_2_4,8_3_1,1_6_0_0_2,2,8,6_0_3,7_8_9_6_7,2_9_5_4_6,2_3,8_0_3,2_0,2_5,4_1_6,5,8,2_3_2,4,2_7_7,6,1_8_5_5,4_6_0_1,3,2_9_5_4_6,5_4,8,3_6_0_9,5,5_7_2_1_1,4_9,4,1,2_7_7,1_8,8,1_7_5_5,1_5_6_9_1,3,3_4_1,2_5,4_1_6,6_9_3,4_2_5_7_3,7_1,1_7,4_0_1,9_4,3_1,1_7_9_1_9,2,2_9_5_4_6,7_8_7_3,1_8,1,4_3_5,2_3,1_1_0_1_1,7_5_5,5,5_1_6_7,3,7_9_8_3,9_8,8_4,2,2_9_5_4_6,3_2_6_7,8,3_6_0_9,4,1,4_8_6_5,1_0_7_5,2,6_0_8_7,7_1,6,3_4_6,8,5_8_5_4,3,2_9_5_4_6,8_2_4,1_4_0_0,1_8_6_8,2,1_9,1_6_0,2,3_1_1,8,5_4_9_6,2,2_0_9_2_0,1_7,2_5,1_5_0_9_7,3,2_4,2_4,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
_UpperCAmelCase : Union[str, Any] = [3_3,1_2_9_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_2,1_7_0_6,1_7,2_0_0_9_8,5,3_2_1_5,2_1,3_7,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,6_2_2_4,8_3_1,1_6_0_0_2,2,8,6_0_3,7_8_9_6_7,2_9_5_4_6,2_3,8_0_3,2_0,2_5,4_1_6,5,8,2_3_2,4,2_7_7,6,1_8_5_5,4_6_0_1,3,2_9_5_4_6,5_4,8,3_6_0_9,5,5_7_2_1_1,4_9,4,1,2_7_7,1_8,8,1_7_5_5,1_5_6_9_1,3,3_4_1,2_5,4_1_6,6_9_3,4_2_5_7_3,7_1,1_7,4_0_1,9_4,3_1,1_7_9_1_9,2,2_9_5_4_6,7_8_7_3,1_8,1,4_3_5,2_3,1_1_0_1_1,7_5_5,5,5_1_6_7,3,7_9_8_3,9_8,8_4,2,2_9_5_4_6,3_2_6_7,8,3_6_0_9,4,1,4_8_6_5,1_0_7_5,2,6_0_8_7,7_1,6,3_4_6,8,5_8_5_4,3,2_9_5_4_6,8_2_4,1_4_0_0,1_8_6_8,2,1_9,1_6_0,2,3_1_1,8,5_4_9_6,2,2_0_9_2_0,1_7,2_5,1_5_0_9_7,3,2_4,2_4,0,3_3,1,1_8_5_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_8,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
_UpperCAmelCase : Optional[Any] = model.generate(lowerCAmelCase__ , max_length=2_0_0 , do_sample=lowerCAmelCase__ )
self.assertListEqual(output_ids[0].numpy().tolist() , lowerCAmelCase__ ) | 17 | '''simple docstring'''
def __UpperCAmelCase ( a_: int, a_: int ):
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
_UpperCAmelCase : List[str] = str(bin(a_ ) )[2:] # remove the leading "0b"
_UpperCAmelCase : Any = str(bin(a_ ) )[2:] # remove the leading "0b"
_UpperCAmelCase : Dict = max(len(a_ ), len(a_ ) )
return "0b" + "".join(
str(int(char_a == "1" and char_b == "1" ) )
for char_a, char_b in zip(a_binary.zfill(a_ ), b_binary.zfill(a_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 17 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = 42
class UpperCAmelCase ( __A , __A ):
'''simple docstring'''
@register_to_config
def __init__( self , lowercase = 6_5_5_3_6 , lowercase = None , lowercase = 2 , lowercase = 2 , lowercase = 0 , lowercase = "fourier" , lowercase = True , lowercase = False , lowercase = 0.0 , lowercase = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , lowercase = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , lowercase = "UNetMidBlock1D" , lowercase = None , lowercase = (3_2, 3_2, 6_4) , lowercase = None , lowercase = 8 , lowercase = 1 , lowercase = False , ):
"""simple docstring"""
super().__init__()
A_ : Union[str, Any] = sample_size
# time
if time_embedding_type == "fourier":
A_ : Union[str, Any] = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=lowercase , log=lowercase , flip_sin_to_cos=lowercase )
A_ : Optional[Any] = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
A_ : List[str] = Timesteps(
block_out_channels[0] , flip_sin_to_cos=lowercase , downscale_freq_shift=lowercase )
A_ : List[Any] = block_out_channels[0]
if use_timestep_embedding:
A_ : str = block_out_channels[0] * 4
A_ : Union[str, Any] = TimestepEmbedding(
in_channels=lowercase , time_embed_dim=lowercase , act_fn=lowercase , out_dim=block_out_channels[0] , )
A_ : Optional[Any] = nn.ModuleList([] )
A_ : str = None
A_ : List[Any] = nn.ModuleList([] )
A_ : Any = None
# down
A_ : Optional[int] = in_channels
for i, down_block_type in enumerate(lowercase ):
A_ : Tuple = output_channel
A_ : Optional[Any] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
A_ : List[Any] = i == len(lowercase ) - 1
A_ : Union[str, Any] = get_down_block(
lowercase , num_layers=lowercase , in_channels=lowercase , out_channels=lowercase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(lowercase )
# mid
A_ : Tuple = get_mid_block(
lowercase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=lowercase , add_downsample=lowercase , )
# up
A_ : Optional[Any] = list(reversed(lowercase ) )
A_ : Any = reversed_block_out_channels[0]
if out_block_type is None:
A_ : Union[str, Any] = out_channels
else:
A_ : List[str] = block_out_channels[0]
for i, up_block_type in enumerate(lowercase ):
A_ : Dict = output_channel
A_ : Dict = (
reversed_block_out_channels[i + 1] if i < len(lowercase ) - 1 else final_upsample_channels
)
A_ : Tuple = i == len(lowercase ) - 1
A_ : int = get_up_block(
lowercase , num_layers=lowercase , in_channels=lowercase , out_channels=lowercase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(lowercase )
A_ : Tuple = output_channel
# out
A_ : str = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 3_2 )
A_ : Optional[Any] = get_out_block(
out_block_type=lowercase , num_groups_out=lowercase , embed_dim=block_out_channels[0] , out_channels=lowercase , act_fn=lowercase , fc_dim=block_out_channels[-1] // 4 , )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase = True , ):
"""simple docstring"""
A_ : List[Any] = timestep
if not torch.is_tensor(lowercase ):
A_ : Optional[Any] = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(lowercase ) and len(timesteps.shape ) == 0:
A_ : List[Any] = timesteps[None].to(sample.device )
A_ : Tuple = self.time_proj(lowercase )
if self.config.use_timestep_embedding:
A_ : Optional[int] = self.time_mlp(lowercase )
else:
A_ : List[str] = timestep_embed[..., None]
A_ : Tuple = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
A_ : int = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
A_ : Dict = ()
for downsample_block in self.down_blocks:
A_ , A_ : Dict = downsample_block(hidden_states=lowercase , temb=lowercase )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
A_ : Tuple = self.mid_block(lowercase , lowercase )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
A_ : Union[str, Any] = down_block_res_samples[-1:]
A_ : Optional[int] = down_block_res_samples[:-1]
A_ : Tuple = upsample_block(lowercase , res_hidden_states_tuple=lowercase , temb=lowercase )
# 5. post-process
if self.out_block:
A_ : List[str] = self.out_block(lowercase , lowercase )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=lowercase )
| 140 | import logging
from transformers import PretrainedConfig
_UpperCAmelCase = logging.getLogger(__name__)
_UpperCAmelCase = {
"""bertabs-finetuned-cnndm""": """https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json""",
}
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''bertabs'''
def __init__( self , lowercase=3_0_5_2_2 , lowercase=5_1_2 , lowercase=6 , lowercase=5_1_2 , lowercase=8 , lowercase=5_1_2 , lowercase=0.2 , lowercase=6 , lowercase=7_6_8 , lowercase=8 , lowercase=2_0_4_8 , lowercase=0.2 , **lowercase , ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : Optional[int] = vocab_size
A_ : Union[str, Any] = max_pos
A_ : List[str] = enc_layers
A_ : Tuple = enc_hidden_size
A_ : List[Any] = enc_heads
A_ : str = enc_ff_size
A_ : Optional[Any] = enc_dropout
A_ : Dict = dec_layers
A_ : Optional[Any] = dec_hidden_size
A_ : int = dec_heads
A_ : Any = dec_ff_size
A_ : List[str] = dec_dropout
| 140 | 1 |
"""simple docstring"""
import os
def lowercase ( A_ = "input.txt" )-> str:
'''simple docstring'''
with open(os.path.join(os.path.dirname(lowerCAmelCase__ ) , lowerCAmelCase__ ) ) as input_file:
a : Any = [
[int(lowerCAmelCase__ ) for element in line.split("," )]
for line in input_file.readlines()
]
a : List[str] = len(lowerCAmelCase__ )
a : Optional[Any] = len(matrix[0] )
a : Dict = [[-1 for _ in range(lowerCAmelCase__ )] for _ in range(lowerCAmelCase__ )]
for i in range(lowerCAmelCase__ ):
a : List[Any] = matrix[i][0]
for j in range(1 , lowerCAmelCase__ ):
for i in range(lowerCAmelCase__ ):
a : Any = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , lowerCAmelCase__ ):
a : Tuple = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
a : Dict = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 352 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def lowercase ( A_ )-> Optional[Any]:
'''simple docstring'''
a : Optional[Any] = 384
if "tiny" in model_name:
a : List[str] = [3, 3, 9, 3]
a : Optional[Any] = [96, 192, 384, 768]
if "small" in model_name:
a : Tuple = [3, 3, 27, 3]
a : str = [96, 192, 384, 768]
if "base" in model_name:
a : Union[str, Any] = [3, 3, 27, 3]
a : Dict = [128, 256, 512, 1_024]
a : Any = 512
if "large" in model_name:
a : Optional[Any] = [3, 3, 27, 3]
a : str = [192, 384, 768, 1_536]
a : Dict = 768
if "xlarge" in model_name:
a : str = [3, 3, 27, 3]
a : List[Any] = [256, 512, 1_024, 2_048]
a : List[Any] = 1_024
# set label information
a : int = 150
a : str = "huggingface/label-files"
a : Tuple = "ade20k-id2label.json"
a : Dict = json.load(open(hf_hub_download(A_ , A_ , repo_type="dataset" ) , "r" ) )
a : int = {int(A_ ): v for k, v in idalabel.items()}
a : List[Any] = {v: k for k, v in idalabel.items()}
a : Optional[int] = ConvNextConfig(
depths=A_ , hidden_sizes=A_ , out_features=["stage1", "stage2", "stage3", "stage4"] )
a : Tuple = UperNetConfig(
backbone_config=A_ , auxiliary_in_channels=A_ , num_labels=A_ , idalabel=A_ , labelaid=A_ , )
return config
def lowercase ( A_ )-> Tuple:
'''simple docstring'''
a : int = []
# fmt: off
# stem
rename_keys.append(("backbone.downsample_layers.0.0.weight", "backbone.embeddings.patch_embeddings.weight") )
rename_keys.append(("backbone.downsample_layers.0.0.bias", "backbone.embeddings.patch_embeddings.bias") )
rename_keys.append(("backbone.downsample_layers.0.1.weight", "backbone.embeddings.layernorm.weight") )
rename_keys.append(("backbone.downsample_layers.0.1.bias", "backbone.embeddings.layernorm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.stages.{i}.{j}.gamma''', F'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.norm.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.norm.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((F'''backbone.downsample_layers.{i}.0.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.0.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.1.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.1.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def lowercase ( A_ , A_ , A_ )-> str:
'''simple docstring'''
a : str = dct.pop(A_ )
a : str = val
def lowercase ( A_ , A_ , A_ )-> int:
'''simple docstring'''
a : Any = {
"upernet-convnext-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth",
"upernet-convnext-small": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth",
"upernet-convnext-base": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth",
"upernet-convnext-large": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth",
"upernet-convnext-xlarge": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth",
}
a : Tuple = model_name_to_url[model_name]
a : int = torch.hub.load_state_dict_from_url(A_ , map_location="cpu" )["state_dict"]
a : Optional[Any] = get_upernet_config(A_ )
a : int = UperNetForSemanticSegmentation(A_ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
a : Tuple = state_dict.pop(A_ )
if "bn" in key:
a : Optional[int] = key.replace("bn" , "batch_norm" )
a : Any = val
# rename keys
a : Any = create_rename_keys(A_ )
for src, dest in rename_keys:
rename_key(A_ , A_ , A_ )
model.load_state_dict(A_ )
# verify on image
a : Dict = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
a : Any = Image.open(requests.get(A_ , stream=A_ ).raw ).convert("RGB" )
a : Union[str, Any] = SegformerImageProcessor()
a : Dict = processor(A_ , return_tensors="pt" ).pixel_values
with torch.no_grad():
a : Any = model(A_ )
if model_name == "upernet-convnext-tiny":
a : List[str] = torch.tensor(
[[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] )
elif model_name == "upernet-convnext-small":
a : int = torch.tensor(
[[-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.7_6_3_8, -8.7_6_3_8, -8.6_2_4_0]] )
elif model_name == "upernet-convnext-base":
a : Union[str, Any] = torch.tensor(
[[-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.7_6_6_9, -8.7_6_6_9, -8.6_0_2_1]] )
elif model_name == "upernet-convnext-large":
a : Union[str, Any] = torch.tensor(
[[-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_3_1_0, -8.6_3_1_0, -8.5_9_6_4]] )
elif model_name == "upernet-convnext-xlarge":
a : str = torch.tensor(
[[-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_3_7_9, -8.4_3_7_9, -8.3_4_1_2]] )
print("Logits:" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , A_ , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A_ )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(A_ )
if push_to_hub:
print(F'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(F'''openmmlab/{model_name}''' )
processor.push_to_hub(F'''openmmlab/{model_name}''' )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-convnext-tiny""",
type=str,
choices=[f'''upernet-convnext-{size}''' for size in ["""tiny""", """small""", """base""", """large""", """xlarge"""]],
help="""Name of the ConvNext UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__lowercase = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 226 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/config.json',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/config.json',
'funnel-transformer/medium-base': 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json',
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/config.json',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json',
'funnel-transformer/xlarge-base': 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json',
}
class A ( __A ):
UpperCamelCase_ : Optional[int] ='''funnel'''
UpperCamelCase_ : str ={
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
}
def __init__(self , lowerCAmelCase=3_0_5_2_2 , lowerCAmelCase=[4, 4, 4] , lowerCAmelCase=None , lowerCAmelCase=2 , lowerCAmelCase=7_6_8 , lowerCAmelCase=1_2 , lowerCAmelCase=6_4 , lowerCAmelCase=3_0_7_2 , lowerCAmelCase="gelu_new" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=0.0 , lowerCAmelCase=0.1 , lowerCAmelCase=None , lowerCAmelCase=1E-9 , lowerCAmelCase="mean" , lowerCAmelCase="relative_shift" , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , **lowerCAmelCase , ):
__lowercase= vocab_size
__lowercase= block_sizes
__lowercase= [1] * len(_lowerCamelCase ) if block_repeats is None else block_repeats
assert len(_lowerCamelCase ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
__lowercase= num_decoder_layers
__lowercase= d_model
__lowercase= n_head
__lowercase= d_head
__lowercase= d_inner
__lowercase= hidden_act
__lowercase= hidden_dropout
__lowercase= attention_dropout
__lowercase= activation_dropout
__lowercase= initializer_range
__lowercase= initializer_std
__lowercase= layer_norm_eps
assert pooling_type in [
"mean",
"max",
], f'Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.'
__lowercase= pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], f'Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.'
__lowercase= attention_type
__lowercase= separate_cls
__lowercase= truncate_seq
__lowercase= pool_q_only
super().__init__(**_lowerCamelCase )
@property
def _A (self ):
return sum(self.block_sizes )
@num_hidden_layers.setter
def _A (self , lowerCAmelCase ):
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.' )
@property
def _A (self ):
return len(self.block_sizes )
@num_blocks.setter
def _A (self , lowerCAmelCase ):
raise NotImplementedError('This model does not support the setting of `num_blocks`. Please set `block_sizes`.' )
| 295 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
UpperCamelCase__ : int = {
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': ['export', 'validate_model_outputs'],
'features': ['FeaturesManager'],
'utils': ['ParameterFormat', 'compute_serialized_parameters_size'],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
UpperCamelCase__ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 344 | 0 |
"""simple docstring"""
import unittest
from knapsack import knapsack as k
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : Tuple = 0
lowercase_ : Dict = [0]
lowercase_ : List[str] = [0]
lowercase_ : List[str] = len(__UpperCamelCase )
self.assertEqual(k.knapsack(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) ,0 )
lowercase_ : Union[str, Any] = [60]
lowercase_ : Dict = [10]
lowercase_ : List[str] = len(__UpperCamelCase )
self.assertEqual(k.knapsack(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) ,0 )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : int = 3
lowercase_ : List[str] = [1, 2, 3]
lowercase_ : Union[str, Any] = [3, 2, 1]
lowercase_ : Optional[int] = len(__UpperCamelCase )
self.assertEqual(k.knapsack(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) ,5 )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Tuple = 50
lowercase_ : int = [60, 100, 120]
lowercase_ : Tuple = [10, 20, 30]
lowercase_ : Union[str, Any] = len(__UpperCamelCase )
self.assertEqual(k.knapsack(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) ,220 )
if __name__ == "__main__":
unittest.main()
| 321 | """simple docstring"""
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase ) -> int:
'''simple docstring'''
lowercase_ : List[Any] = name
lowercase_ : int = val
def __str__( self ) -> Tuple:
'''simple docstring'''
return f'''{self.__class__.__name__}({self.name}, {self.val})'''
def __lt__( self ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
return self.val < other.val
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
lowercase_ : Optional[int] = {}
lowercase_ : Tuple = {}
lowercase_ : Union[str, Any] = self.build_heap(__UpperCamelCase )
def __getitem__( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
return self.get_value(__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]:
'''simple docstring'''
return (idx - 1) // 2
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
return idx * 2 + 1
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
return idx * 2 + 2
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
return self.heap_dict[key]
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]:
'''simple docstring'''
lowercase_ : Optional[int] = len(__UpperCamelCase ) - 1
lowercase_ : Optional[int] = self.get_parent_idx(__UpperCamelCase )
for idx, i in enumerate(__UpperCamelCase ):
lowercase_ : Any = idx
lowercase_ : str = i.val
for i in range(__UpperCamelCase ,-1 ,-1 ):
self.sift_down(__UpperCamelCase ,__UpperCamelCase )
return array
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
while True:
lowercase_ : List[str] = self.get_left_child_idx(__UpperCamelCase ) # noqa: E741
lowercase_ : List[str] = self.get_right_child_idx(__UpperCamelCase )
lowercase_ : List[str] = idx
if l < len(__UpperCamelCase ) and array[l] < array[idx]:
lowercase_ : List[str] = l
if r < len(__UpperCamelCase ) and array[r] < array[smallest]:
lowercase_ : Dict = r
if smallest != idx:
lowercase_ , lowercase_ : Union[str, Any] = array[smallest], array[idx]
(
(
lowercase_
) , (
lowercase_
) ,
) : str = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
lowercase_ : Any = smallest
else:
break
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
lowercase_ : Dict = self.get_parent_idx(__UpperCamelCase )
while p >= 0 and self.heap[p] > self.heap[idx]:
lowercase_ , lowercase_ : Any = self.heap[idx], self.heap[p]
lowercase_ , lowercase_ : Tuple = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
lowercase_ : int = p
lowercase_ : str = self.get_parent_idx(__UpperCamelCase )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
return self.heap[0]
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ , lowercase_ : Optional[Any] = self.heap[-1], self.heap[0]
lowercase_ , lowercase_ : Tuple = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
lowercase_ : Tuple = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 ,self.heap )
return x
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
self.heap.append(__UpperCamelCase )
lowercase_ : Tuple = len(self.heap ) - 1
lowercase_ : Optional[int] = node.val
self.sift_up(len(self.heap ) - 1 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return len(self.heap ) == 0
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
lowercase_ : Any = new_value
lowercase_ : List[str] = new_value
self.sift_up(self.idx_of_element[node] )
__SCREAMING_SNAKE_CASE =Node("R", -1)
__SCREAMING_SNAKE_CASE =Node("B", 6)
__SCREAMING_SNAKE_CASE =Node("A", 3)
__SCREAMING_SNAKE_CASE =Node("X", 1)
__SCREAMING_SNAKE_CASE =Node("E", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
__SCREAMING_SNAKE_CASE =MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("Min Heap - before decrease key")
for i in my_min_heap.heap:
print(i)
print("Min Heap - After decrease key of node [B -> -17]")
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 321 | 1 |
"""simple docstring"""
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
_A = float("""nan""")
class _lowerCamelCase :
def __init__( self : str , UpperCamelCase : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = sys.stdout
lowerCAmelCase__ : int = open(UpperCamelCase , """a""" )
def __getattr__( self : Any , UpperCamelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return getattr(self.stdout , UpperCamelCase )
def _lowerCAmelCase ( self : List[str] , UpperCamelCase : Optional[int] ) -> Tuple:
"""simple docstring"""
self.stdout.write(UpperCamelCase )
# strip tqdm codes
self.file.write(re.sub(r"""^.*\r""" , """""" , UpperCamelCase , 0 , re.M ) )
def lowercase_ ( __UpperCAmelCase=80 , __UpperCAmelCase=False ) -> Optional[int]:
lowerCAmelCase__ : List[Any] = []
# deal with critical env vars
lowerCAmelCase__ : Optional[Any] = ["""CUDA_VISIBLE_DEVICES"""]
for key in env_keys:
lowerCAmelCase__ : int = os.environ.get(__UpperCAmelCase , __UpperCAmelCase )
if val is not None:
cmd.append(f"""{key}={val}""" )
# python executable (not always needed if the script is executable)
lowerCAmelCase__ : List[Any] = sys.executable if full_python_path else sys.executable.split("""/""" )[-1]
cmd.append(__UpperCAmelCase )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
lowerCAmelCase__ : List[Any] = []
lowerCAmelCase__ : List[Any] = """"""
while len(__UpperCAmelCase ) > 0:
current_line += f"""{cmd.pop(0 )} """
if len(__UpperCAmelCase ) == 0 or len(__UpperCAmelCase ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = """"""
return "\\\n".join(__UpperCAmelCase )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
# unwrap multi-line input
lowerCAmelCase__ : Optional[Any] = re.sub(R"""[\\\n]+""" , """ """ , args.base_cmd )
# remove --output_dir if any and set our own
lowerCAmelCase__ : Union[str, Any] = re.sub("""--output_dir\s+[^\s]+""" , """""" , args.base_cmd )
args.base_cmd += f""" --output_dir {output_dir}"""
# ensure we have --overwrite_output_dir
lowerCAmelCase__ : str = re.sub("""--overwrite_output_dir\s+""" , """""" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.22222222] )} , )
lowerCAmelCase__ : Optional[int] = subprocess.run(__UpperCAmelCase , capture_output=__UpperCAmelCase , text=__UpperCAmelCase )
if verbose:
print("""STDOUT""" , result.stdout )
print("""STDERR""" , result.stderr )
# save the streams
lowerCAmelCase__ : Optional[int] = variation.replace(""" """ , """-""" )
with open(Path(__UpperCAmelCase ) / f"""log.{prefix}.stdout.txt""" , """w""" ) as f:
f.write(result.stdout )
with open(Path(__UpperCAmelCase ) / f"""log.{prefix}.stderr.txt""" , """w""" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("""failed""" )
return {target_metric_key: nan}
with io.open(f"""{output_dir}/all_results.json""" , """r""" , encoding="""utf-8""" ) as f:
lowerCAmelCase__ : Union[str, Any] = json.load(__UpperCAmelCase )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Tuple:
lowerCAmelCase__ : Any = []
lowerCAmelCase__ : Optional[Any] = []
lowerCAmelCase__ : Optional[int] = f"""{id}: {variation:<{longest_variation_len}}"""
lowerCAmelCase__ : Dict = f"""{preamble}: """
lowerCAmelCase__ : int = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(__UpperCAmelCase ) , desc=__UpperCAmelCase , leave=__UpperCAmelCase ):
lowerCAmelCase__ : List[str] = process_run_single(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : str = single_run_metrics[target_metric_key]
if not math.isnan(__UpperCAmelCase ):
metrics.append(__UpperCAmelCase )
results.append(__UpperCAmelCase )
outcome += "✓"
else:
outcome += "✘"
lowerCAmelCase__ : str = f"""\33[2K\r{outcome}"""
if len(__UpperCAmelCase ) > 0:
lowerCAmelCase__ : Dict = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
lowerCAmelCase__ : Optional[Any] = round(mean_metrics[target_metric_key] , 2 )
lowerCAmelCase__ : List[Any] = f"""{outcome} {mean_target}"""
if len(__UpperCAmelCase ) > 1:
results_str += f""" {tuple(round(__UpperCAmelCase , 2 ) for x in results )}"""
print(__UpperCAmelCase )
lowerCAmelCase__ : Tuple = variation
return mean_metrics
else:
print(__UpperCAmelCase )
return {variation_key: variation, target_metric_key: nan}
def lowercase_ ( ) -> Any:
lowerCAmelCase__ : int = torch.cuda.get_device_properties(torch.device("""cuda""" ) )
return f"""
Datetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
"""
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
lowerCAmelCase__ : Union[str, Any] = pd.DataFrame(__UpperCAmelCase )
lowerCAmelCase__ : int = """variation"""
lowerCAmelCase__ : Any = """diff_%"""
lowerCAmelCase__ : int = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
lowerCAmelCase__ : Dict = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(__UpperCAmelCase ):
# as a fallback, use the minimal value as the sentinel
lowerCAmelCase__ : Any = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(__UpperCAmelCase ):
lowerCAmelCase__ : List[str] = df.apply(
lambda __UpperCAmelCase : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="""columns""" , )
# re-order columns
lowerCAmelCase__ : Tuple = [variation_key, target_metric_key, diff_key, *report_metric_keys]
lowerCAmelCase__ : List[Any] = df.reindex(__UpperCAmelCase , axis="""columns""" ) # reorder cols
# capitalize
lowerCAmelCase__ : str = df.rename(str.capitalize , axis="""columns""" )
# make the cols as narrow as possible
lowerCAmelCase__ : Dict = df.rename(lambda __UpperCAmelCase : c.replace("""_""" , """<br>""" ) , axis="""columns""" )
lowerCAmelCase__ : Optional[int] = df.rename(lambda __UpperCAmelCase : c.replace("""_""" , """\n""" ) , axis="""columns""" )
lowerCAmelCase__ : Dict = ["""""", """Copy between the cut-here-lines and paste as is to github or a forum"""]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=__UpperCAmelCase , floatfmt=""".2f""" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=__UpperCAmelCase , floatfmt=""".2f""" )]
print("""\n\n""".join(__UpperCAmelCase ) )
def lowercase_ ( ) -> Dict:
lowerCAmelCase__ : Dict = argparse.ArgumentParser()
parser.add_argument(
"""--base-cmd""" , default=__UpperCAmelCase , type=__UpperCAmelCase , required=__UpperCAmelCase , help="""Base cmd""" , )
parser.add_argument(
"""--variations""" , default=__UpperCAmelCase , type=__UpperCAmelCase , nargs="""+""" , required=__UpperCAmelCase , help="""Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'""" , )
parser.add_argument(
"""--base-variation""" , default=__UpperCAmelCase , type=__UpperCAmelCase , help="""Baseline variation to compare to. if None the minimal target value will be used to compare against""" , )
parser.add_argument(
"""--target-metric-key""" , default=__UpperCAmelCase , type=__UpperCAmelCase , required=__UpperCAmelCase , help="""Target metric key in output_dir/all_results.json, e.g., train_samples_per_second""" , )
parser.add_argument(
"""--report-metric-keys""" , default="""""" , type=__UpperCAmelCase , help="""Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples""" , )
parser.add_argument(
"""--repeat-times""" , default=1 , type=__UpperCAmelCase , help="""How many times to re-run each variation - an average will be reported""" , )
parser.add_argument(
"""--output_dir""" , default="""output_benchmark""" , type=__UpperCAmelCase , help="""The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked""" , )
parser.add_argument(
"""--verbose""" , default=__UpperCAmelCase , action="""store_true""" , help="""Whether to show the outputs of each run or just the benchmark progress""" , )
lowerCAmelCase__ : int = parser.parse_args()
lowerCAmelCase__ : Any = args.output_dir
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
lowerCAmelCase__ : int = get_base_command(__UpperCAmelCase , __UpperCAmelCase )
# split each dimension into its --foo variations
lowerCAmelCase__ : int = [list(map(str.strip , re.split(R"""\|""" , __UpperCAmelCase ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
lowerCAmelCase__ : Any = list(map(str.strip , map(""" """.join , itertools.product(*__UpperCAmelCase ) ) ) )
lowerCAmelCase__ : Optional[int] = max(len(__UpperCAmelCase ) for x in variations )
# split wanted keys
lowerCAmelCase__ : Union[str, Any] = args.report_metric_keys.split()
# capture prints into a log file for convenience
lowerCAmelCase__ : Dict = f"""benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S' )}.txt"""
print(f"""\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt""" )
print(f"""and this script's output is also piped into {report_fn}""" )
lowerCAmelCase__ : Optional[int] = Tee(__UpperCAmelCase )
print(f"""\n*** Running {len(__UpperCAmelCase )} benchmarks:""" )
print(f"""Base command: {' '.join(__UpperCAmelCase )}""" )
lowerCAmelCase__ : Any = """variation"""
lowerCAmelCase__ : Optional[Any] = []
for id, variation in enumerate(tqdm(__UpperCAmelCase , desc="""Total completion: """ , leave=__UpperCAmelCase ) ):
lowerCAmelCase__ : str = base_cmd + variation.split()
results.append(
process_run(
id + 1 , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , args.target_metric_key , __UpperCAmelCase , args.repeat_times , __UpperCAmelCase , args.verbose , ) )
process_results(__UpperCAmelCase , args.target_metric_key , __UpperCAmelCase , args.base_variation , __UpperCAmelCase )
if __name__ == "__main__":
main()
| 242 |
"""simple docstring"""
import math
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
lowerCAmelCase__ : Any = len(__UpperCAmelCase )
lowerCAmelCase__ : int = int(math.floor(math.sqrt(__UpperCAmelCase ) ) )
lowerCAmelCase__ : Optional[int] = 0
while arr[min(__UpperCAmelCase , __UpperCAmelCase ) - 1] < x:
lowerCAmelCase__ : Any = step
step += int(math.floor(math.sqrt(__UpperCAmelCase ) ) )
if prev >= n:
return -1
while arr[prev] < x:
lowerCAmelCase__ : List[Any] = prev + 1
if prev == min(__UpperCAmelCase , __UpperCAmelCase ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
_A = input("""Enter numbers separated by a comma:\n""").strip()
_A = [int(item) for item in user_input.split(""",""")]
_A = int(input("""Enter the number to be searched:\n"""))
_A = jump_search(arr, x)
if res == -1:
print("""Number not found!""")
else:
print(f"""Number {x} is at index {res}""")
| 242 | 1 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase=0 ) -> Optional[int]:
return sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase : x[column] )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=float('''inf''' ) ) -> int:
for i in range(points_counts - 1 ):
for j in range(i + 1 , __UpperCAmelCase ):
lowercase__: Dict = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
lowercase__: List[Any] = current_dis
return min_dis
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=float('''inf''' ) ) -> int:
for i in range(min(6 , points_counts - 1 ) , __UpperCAmelCase ):
for j in range(max(0 , i - 6 ) , __UpperCAmelCase ):
lowercase__: Union[str, Any] = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
lowercase__: Any = current_dis
return min_dis
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
if points_counts <= 3:
return dis_between_closest_pair(__UpperCAmelCase , __UpperCAmelCase )
# recursion
lowercase__: int = points_counts // 2
lowercase__: List[Any] = closest_pair_of_points_sqr(
__UpperCAmelCase , points_sorted_on_y[:mid] , __UpperCAmelCase )
lowercase__: Dict = closest_pair_of_points_sqr(
__UpperCAmelCase , points_sorted_on_y[mid:] , points_counts - mid )
lowercase__: List[Any] = min(__UpperCAmelCase , __UpperCAmelCase )
lowercase__: Optional[int] = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(__UpperCAmelCase )
lowercase__: str = dis_between_closest_in_strip(
__UpperCAmelCase , len(__UpperCAmelCase ) , __UpperCAmelCase )
return min(__UpperCAmelCase , __UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
lowercase__: Optional[int] = column_based_sort(__UpperCAmelCase , column=0 )
lowercase__: str = column_based_sort(__UpperCAmelCase , column=1 )
return (
closest_pair_of_points_sqr(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
) ** 0.5
if __name__ == "__main__":
__A = [(2, 3), (1_2, 3_0), (4_0, 5_0), (5, 1), (1_2, 1_0), (3, 4)]
print("Distance:", closest_pair_of_points(points, len(points)))
| 353 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[Any] = "ctrl"
_UpperCAmelCase :int = ["past_key_values"]
_UpperCAmelCase :Dict = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , _UpperCAmelCase=246534 , _UpperCAmelCase=256 , _UpperCAmelCase=1280 , _UpperCAmelCase=8192 , _UpperCAmelCase=48 , _UpperCAmelCase=16 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1e-6 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , **_UpperCAmelCase , ):
lowercase__: Union[str, Any] = vocab_size
lowercase__: Optional[int] = n_positions
lowercase__: Optional[int] = n_embd
lowercase__: Any = n_layer
lowercase__: Any = n_head
lowercase__: int = dff
lowercase__: Dict = resid_pdrop
lowercase__: Any = embd_pdrop
lowercase__: Any = layer_norm_epsilon
lowercase__: Optional[int] = initializer_range
lowercase__: Dict = use_cache
super().__init__(**_UpperCAmelCase )
| 2 | 0 |
'''simple docstring'''
from timeit import timeit
UpperCamelCase_ = {
"""MALAYALAM""": True,
"""String""": False,
"""rotor""": True,
"""level""": True,
"""A""": True,
"""BB""": True,
"""ABC""": False,
"""amanaplanacanalpanama""": True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def _UpperCAmelCase ( _lowerCamelCase : str ) -> bool:
_lowerCAmelCase : List[str] = 0
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def _UpperCAmelCase ( _lowerCamelCase : str ) -> bool:
_lowerCAmelCase : Union[str, Any] = len(_lowerCamelCase ) // 2
_lowerCAmelCase : int = len(_lowerCamelCase )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(_lowerCamelCase ) )
def _UpperCAmelCase ( _lowerCamelCase : str ) -> bool:
if len(_lowerCamelCase ) <= 2:
return True
if s[0] == s[len(_lowerCamelCase ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def _UpperCAmelCase ( _lowerCamelCase : str ) -> bool:
return s == s[::-1]
def _UpperCAmelCase ( _lowerCamelCase : str ) -> None:
_lowerCAmelCase : Any = f'all({name}(key) is value for key, value in test_data.items())'
_lowerCAmelCase : Optional[int] = f'from __main__ import test_data, {name}'
_lowerCAmelCase : int = 50_00_00
_lowerCAmelCase : Dict = timeit(stmt=_lowerCamelCase , setup=_lowerCamelCase , number=_lowerCamelCase )
print(f'{name:<35} finished {number:,} runs in {result:.5f} seconds' )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(F'{key:21} {value}')
print("""a man a plan a canal panama""")
# finished 500,000 runs in 0.46793 seconds
benchmark_function("""is_palindrome_slice""")
# finished 500,000 runs in 0.85234 seconds
benchmark_function("""is_palindrome""")
# finished 500,000 runs in 1.32028 seconds
benchmark_function("""is_palindrome_recursive""")
# finished 500,000 runs in 2.08679 seconds
benchmark_function("""is_palindrome_traversal""")
| 309 |
'''simple docstring'''
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
UpperCamelCase_ = logging.get_logger(__name__)
class a_ :
def __init__( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : List[str] = question_encoder
_lowerCAmelCase : Optional[Any] = generator
_lowerCAmelCase : Optional[Any] = self.question_encoder
def __UpperCamelCase ( self , snake_case_ ):
if os.path.isfile(snake_case_ ):
raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
_lowerCAmelCase : Any = os.path.join(snake_case_ , """question_encoder_tokenizer""" )
_lowerCAmelCase : Tuple = os.path.join(snake_case_ , """generator_tokenizer""" )
self.question_encoder.save_pretrained(snake_case_ )
self.generator.save_pretrained(snake_case_ )
@classmethod
def __UpperCamelCase ( cls , snake_case_ , **snake_case_ ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
_lowerCAmelCase : Dict = kwargs.pop("""config""" , snake_case_ )
if config is None:
_lowerCAmelCase : List[Any] = RagConfig.from_pretrained(snake_case_ )
_lowerCAmelCase : int = AutoTokenizer.from_pretrained(
snake_case_ , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" )
_lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(
snake_case_ , config=config.generator , subfolder="""generator_tokenizer""" )
return cls(question_encoder=snake_case_ , generator=snake_case_ )
def __call__( self , *snake_case_ , **snake_case_ ):
return self.current_tokenizer(*snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , *snake_case_ , **snake_case_ ):
return self.generator.batch_decode(*snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , *snake_case_ , **snake_case_ ):
return self.generator.decode(*snake_case_ , **snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.question_encoder
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[Any] = self.generator
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = "longest" , snake_case_ = None , snake_case_ = True , **snake_case_ , ):
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" , snake_case_ , )
if max_length is None:
_lowerCAmelCase : Any = self.current_tokenizer.model_max_length
_lowerCAmelCase : List[Any] = self(
snake_case_ , add_special_tokens=snake_case_ , return_tensors=snake_case_ , max_length=snake_case_ , padding=snake_case_ , truncation=snake_case_ , **snake_case_ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
_lowerCAmelCase : List[str] = self.current_tokenizer.model_max_length
_lowerCAmelCase : List[str] = self(
text_target=snake_case_ , add_special_tokens=snake_case_ , return_tensors=snake_case_ , padding=snake_case_ , max_length=snake_case_ , truncation=snake_case_ , **snake_case_ , )
_lowerCAmelCase : Dict = labels["""input_ids"""]
return model_inputs
| 309 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
lowerCAmelCase : Dict =logging.get_logger(__name__)
class a_ ( _lowerCAmelCase ):
def __init__( self : Tuple , *lowercase : Dict , **lowercase : List[Any] ):
"""simple docstring"""
warnings.warn(
"The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use GLPNImageProcessor instead." , lowercase , )
super().__init__(*lowercase , **lowercase )
| 361 |
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__)
@add_end_docstrings(_lowerCAmelCase )
class a_ ( _lowerCAmelCase ):
def __init__( self : List[Any] , **lowercase : Optional[int] ):
"""simple docstring"""
super().__init__(**lowercase )
if self.framework == "tf":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
requires_backends(self , "vision" )
self.check_model_type(lowercase )
def __call__( self : Tuple , lowercase : Union[str, "Image.Image", List[Dict[str, Any]]] , lowercase : Union[str, List[str]] = None , **lowercase : str , ):
"""simple docstring"""
if "text_queries" in kwargs:
lowercase_ :List[Any] = kwargs.pop("text_queries" )
if isinstance(lowercase , (str, Image.Image) ):
lowercase_ :List[str] = {"image": image, "candidate_labels": candidate_labels}
else:
lowercase_ :Optional[Any] = image
lowercase_ :str = super().__call__(lowercase , **lowercase )
return results
def lowercase__ ( self : Optional[int] , **lowercase : List[str] ):
"""simple docstring"""
lowercase_ :Tuple = {}
if "threshold" in kwargs:
lowercase_ :Dict = kwargs["threshold"]
if "top_k" in kwargs:
lowercase_ :Optional[Any] = kwargs["top_k"]
return {}, {}, postprocess_params
def lowercase__ ( self : List[Any] , lowercase : Dict ):
"""simple docstring"""
lowercase_ :Any = load_image(inputs["image"] )
lowercase_ :List[str] = inputs["candidate_labels"]
if isinstance(lowercase , lowercase ):
lowercase_ :Union[str, Any] = candidate_labels.split("," )
lowercase_ :Tuple = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(lowercase ):
lowercase_ :Union[str, Any] = self.tokenizer(lowercase , return_tensors=self.framework )
lowercase_ :Tuple = self.image_processor(lowercase , return_tensors=self.framework )
yield {
"is_last": i == len(lowercase ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def lowercase__ ( self : List[str] , lowercase : List[str] ):
"""simple docstring"""
lowercase_ :Dict = model_inputs.pop("target_size" )
lowercase_ :str = model_inputs.pop("candidate_label" )
lowercase_ :List[Any] = model_inputs.pop("is_last" )
lowercase_ :Optional[Any] = self.model(**lowercase )
lowercase_ :str = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def lowercase__ ( self : Optional[int] , lowercase : List[str] , lowercase : List[str]=0.1 , lowercase : Optional[int]=None ):
"""simple docstring"""
lowercase_ :Dict = []
for model_output in model_outputs:
lowercase_ :int = model_output["candidate_label"]
lowercase_ :str = BaseModelOutput(lowercase )
lowercase_ :List[Any] = self.image_processor.post_process_object_detection(
outputs=lowercase , threshold=lowercase , target_sizes=model_output["target_size"] )[0]
for index in outputs["scores"].nonzero():
lowercase_ :Optional[int] = outputs["scores"][index].item()
lowercase_ :int = self._get_bounding_box(outputs["boxes"][index][0] )
lowercase_ :int = {"score": score, "label": label, "box": box}
results.append(lowercase )
lowercase_ :Dict = sorted(lowercase , key=lambda lowercase : x["score"] , reverse=lowercase )
if top_k:
lowercase_ :List[str] = results[:top_k]
return results
def lowercase__ ( self : Union[str, Any] , lowercase : "torch.Tensor" ):
"""simple docstring"""
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch." )
lowercase_ , lowercase_ , lowercase_ , lowercase_ :List[str] = box.int().tolist()
lowercase_ :List[Any] = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 147 | 0 |
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
__UpperCAmelCase = None
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__UpperCAmelCase = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
__UpperCAmelCase = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : int = VOCAB_FILES_NAMES
_snake_case : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_snake_case : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : str = ['''input_ids''', '''attention_mask''']
_snake_case : List[Any] = TaTokenizer
_snake_case : List[int] = []
def __init__( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase="</s>" , _UpperCamelCase="<unk>" , _UpperCamelCase="<pad>" , _UpperCamelCase=1_0_0 , _UpperCamelCase=None , **_UpperCamelCase , ) -> str:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
UpperCAmelCase_ : List[str] = [f"<extra_id_{i}>" for i in range(_UpperCamelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
UpperCAmelCase_ : Tuple = len(set(filter(lambda _UpperCamelCase : bool('extra_id_' in str(_UpperCamelCase ) ) , _UpperCamelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens' )
super().__init__(
_UpperCamelCase , tokenizer_file=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , extra_ids=_UpperCamelCase , additional_special_tokens=_UpperCamelCase , **_UpperCamelCase , )
UpperCAmelCase_ : Any = vocab_file
UpperCAmelCase_ : Tuple = False if not self.vocab_file else True
UpperCAmelCase_ : str = extra_ids
@staticmethod
def __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> int:
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
UpperCAmelCase_ : Any = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
f" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
f" {pretrained_model_name_or_path} automatically truncating your input to"
f" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"
f" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.' , _UpperCamelCase , )
return max_model_length
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_UpperCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase_ : int = os.path.join(
_UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ):
copyfile(self.vocab_file , _UpperCamelCase )
logger.info(f"Copy vocab file to {out_vocab_file}" )
return (out_vocab_file,)
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> List[int]:
UpperCAmelCase_ : List[str] = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
UpperCAmelCase_ : int = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> List[int]:
UpperCAmelCase_ : Any = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __UpperCAmelCase ( self ) -> Tuple:
return list(
set(filter(lambda _UpperCamelCase : bool(re.search(r'<extra_id_\d+>' , _UpperCamelCase ) ) is not None , self.additional_special_tokens ) ) )
def __UpperCAmelCase ( self ) -> str:
return [self.convert_tokens_to_ids(_UpperCamelCase ) for token in self.get_sentinel_tokens()]
| 29 | """simple docstring"""
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class _A :
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
lowercase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
lowercase = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowercase = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=__lowerCAmelCase , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
lowercase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
lowercase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
lowercase = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , class_embed_type="""timestep""" , mid_block_scale_factor=1.4_1_4 , time_embedding_act_fn="""gelu""" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowercase = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=__lowerCAmelCase , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
lowercase = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , )
torch.manual_seed(0 )
lowercase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = self.get_dummy_inputs(__lowerCAmelCase )
lowercase = inputs["""prompt"""]
lowercase = inputs["""generator"""]
lowercase = inputs["""num_inference_steps"""]
lowercase = inputs["""output_type"""]
if "image" in inputs:
lowercase = inputs["""image"""]
else:
lowercase = None
if "mask_image" in inputs:
lowercase = inputs["""mask_image"""]
else:
lowercase = None
if "original_image" in inputs:
lowercase = inputs["""original_image"""]
else:
lowercase = None
lowercase , lowercase = pipe.encode_prompt(__lowerCAmelCase )
# inputs with prompt converted to embeddings
lowercase = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
lowercase = image
if mask_image is not None:
lowercase = mask_image
if original_image is not None:
lowercase = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowercase = pipe(**__lowerCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__lowerCAmelCase )
lowercase = self.pipeline_class.from_pretrained(__lowerCAmelCase )
pipe_loaded.to(__lowerCAmelCase )
pipe_loaded.set_progress_bar_config(disable=__lowerCAmelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(__lowerCAmelCase , __lowerCAmelCase ) is None , f'`{optional_component}` did not stay set to None after loading.' , )
lowercase = self.get_dummy_inputs(__lowerCAmelCase )
lowercase = inputs["""generator"""]
lowercase = inputs["""num_inference_steps"""]
lowercase = inputs["""output_type"""]
# inputs with prompt converted to embeddings
lowercase = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
lowercase = image
if mask_image is not None:
lowercase = mask_image
if original_image is not None:
lowercase = original_image
lowercase = pipe_loaded(**__lowerCAmelCase )[0]
lowercase = np.abs(to_np(__lowerCAmelCase ) - to_np(__lowerCAmelCase ) ).max()
self.assertLess(__lowerCAmelCase , 1E-4 )
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = self.get_dummy_inputs(__lowerCAmelCase )
lowercase = pipe(**__lowerCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__lowerCAmelCase )
lowercase = self.pipeline_class.from_pretrained(__lowerCAmelCase )
pipe_loaded.to(__lowerCAmelCase )
pipe_loaded.set_progress_bar_config(disable=__lowerCAmelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
lowercase = self.get_dummy_inputs(__lowerCAmelCase )
lowercase = pipe_loaded(**__lowerCAmelCase )[0]
lowercase = np.abs(to_np(__lowerCAmelCase ) - to_np(__lowerCAmelCase ) ).max()
self.assertLess(__lowerCAmelCase , 1E-4 )
| 197 | 0 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
_UpperCAmelCase : str = logging.get_logger(__name__)
_UpperCAmelCase : List[Any] = OrderedDict(
[
("audio-spectrogram-transformer", "ASTFeatureExtractor"),
("beit", "BeitFeatureExtractor"),
("chinese_clip", "ChineseCLIPFeatureExtractor"),
("clap", "ClapFeatureExtractor"),
("clip", "CLIPFeatureExtractor"),
("clipseg", "ViTFeatureExtractor"),
("conditional_detr", "ConditionalDetrFeatureExtractor"),
("convnext", "ConvNextFeatureExtractor"),
("cvt", "ConvNextFeatureExtractor"),
("data2vec-audio", "Wav2Vec2FeatureExtractor"),
("data2vec-vision", "BeitFeatureExtractor"),
("deformable_detr", "DeformableDetrFeatureExtractor"),
("deit", "DeiTFeatureExtractor"),
("detr", "DetrFeatureExtractor"),
("dinat", "ViTFeatureExtractor"),
("donut-swin", "DonutFeatureExtractor"),
("dpt", "DPTFeatureExtractor"),
("encodec", "EncodecFeatureExtractor"),
("flava", "FlavaFeatureExtractor"),
("glpn", "GLPNFeatureExtractor"),
("groupvit", "CLIPFeatureExtractor"),
("hubert", "Wav2Vec2FeatureExtractor"),
("imagegpt", "ImageGPTFeatureExtractor"),
("layoutlmv2", "LayoutLMv2FeatureExtractor"),
("layoutlmv3", "LayoutLMv3FeatureExtractor"),
("levit", "LevitFeatureExtractor"),
("maskformer", "MaskFormerFeatureExtractor"),
("mctct", "MCTCTFeatureExtractor"),
("mobilenet_v1", "MobileNetV1FeatureExtractor"),
("mobilenet_v2", "MobileNetV2FeatureExtractor"),
("mobilevit", "MobileViTFeatureExtractor"),
("nat", "ViTFeatureExtractor"),
("owlvit", "OwlViTFeatureExtractor"),
("perceiver", "PerceiverFeatureExtractor"),
("poolformer", "PoolFormerFeatureExtractor"),
("regnet", "ConvNextFeatureExtractor"),
("resnet", "ConvNextFeatureExtractor"),
("segformer", "SegformerFeatureExtractor"),
("sew", "Wav2Vec2FeatureExtractor"),
("sew-d", "Wav2Vec2FeatureExtractor"),
("speech_to_text", "Speech2TextFeatureExtractor"),
("speecht5", "SpeechT5FeatureExtractor"),
("swiftformer", "ViTFeatureExtractor"),
("swin", "ViTFeatureExtractor"),
("swinv2", "ViTFeatureExtractor"),
("table-transformer", "DetrFeatureExtractor"),
("timesformer", "VideoMAEFeatureExtractor"),
("tvlt", "TvltFeatureExtractor"),
("unispeech", "Wav2Vec2FeatureExtractor"),
("unispeech-sat", "Wav2Vec2FeatureExtractor"),
("van", "ConvNextFeatureExtractor"),
("videomae", "VideoMAEFeatureExtractor"),
("vilt", "ViltFeatureExtractor"),
("vit", "ViTFeatureExtractor"),
("vit_mae", "ViTFeatureExtractor"),
("vit_msn", "ViTFeatureExtractor"),
("wav2vec2", "Wav2Vec2FeatureExtractor"),
("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"),
("wavlm", "Wav2Vec2FeatureExtractor"),
("whisper", "WhisperFeatureExtractor"),
("xclip", "CLIPFeatureExtractor"),
("yolos", "YolosFeatureExtractor"),
]
)
_UpperCAmelCase : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def UpperCAmelCase__ ( lowerCamelCase ):
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
lowercase :int = model_type_to_module_name(_A )
lowercase :List[str] = importlib.import_module(F".{module_name}", "transformers.models" )
try:
return getattr(_A, _A )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(_A, "__name__", _A ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowercase :List[Any] = importlib.import_module("transformers" )
if hasattr(_A, _A ):
return getattr(_A, _A )
return None
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase = None, lowerCamelCase = False, lowerCamelCase = False, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = False, **lowerCamelCase, ):
lowercase :Dict = get_file_from_repo(
_A, _A, cache_dir=_A, force_download=_A, resume_download=_A, proxies=_A, use_auth_token=_A, revision=_A, local_files_only=_A, )
if resolved_config_file is None:
logger.info(
"Could not locate the feature extractor configuration file, will try to use the model config instead." )
return {}
with open(_A, encoding="utf-8" ) as reader:
return json.load(_A )
class __lowerCAmelCase :
def __init__( self: Tuple ):
raise EnvironmentError(
"AutoFeatureExtractor is designed to be instantiated "
"using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method." )
@classmethod
@replace_list_option_in_docstrings(_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( cls: List[Any] , _lowerCAmelCase: Tuple , **_lowerCAmelCase: Tuple ):
lowercase :str = kwargs.pop("config" , _SCREAMING_SNAKE_CASE )
lowercase :Tuple = kwargs.pop("trust_remote_code" , _SCREAMING_SNAKE_CASE )
lowercase :str = True
lowercase , lowercase :int = FeatureExtractionMixin.get_feature_extractor_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
lowercase :Any = config_dict.get("feature_extractor_type" , _SCREAMING_SNAKE_CASE )
lowercase :int = None
if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ):
lowercase :Any = config_dict["auto_map"]["AutoFeatureExtractor"]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowercase :List[Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# It could be in `config.feature_extractor_type``
lowercase :List[Any] = getattr(_SCREAMING_SNAKE_CASE , "feature_extractor_type" , _SCREAMING_SNAKE_CASE )
if hasattr(_SCREAMING_SNAKE_CASE , "auto_map" ) and "AutoFeatureExtractor" in config.auto_map:
lowercase :Tuple = config.auto_map["AutoFeatureExtractor"]
if feature_extractor_class is not None:
lowercase :Any = feature_extractor_class_from_name(_SCREAMING_SNAKE_CASE )
lowercase :Optional[int] = feature_extractor_auto_map is not None
lowercase :List[Any] = feature_extractor_class is not None or type(_SCREAMING_SNAKE_CASE ) in FEATURE_EXTRACTOR_MAPPING
lowercase :int = resolve_trust_remote_code(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if has_remote_code and trust_remote_code:
lowercase :Tuple = get_class_from_dynamic_module(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
lowercase :Dict = kwargs.pop("code_revision" , _SCREAMING_SNAKE_CASE )
if os.path.isdir(_SCREAMING_SNAKE_CASE ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(_SCREAMING_SNAKE_CASE ) in FEATURE_EXTRACTOR_MAPPING:
lowercase :str = FEATURE_EXTRACTOR_MAPPING[type(_SCREAMING_SNAKE_CASE )]
return feature_extractor_class.from_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
raise ValueError(
F"Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a "
F"`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following "
F"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}" )
@staticmethod
def SCREAMING_SNAKE_CASE ( _lowerCAmelCase: Optional[int] , _lowerCAmelCase: Any ):
FEATURE_EXTRACTOR_MAPPING.register(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 355 |
def UpperCAmelCase__ ( lowerCamelCase ):
if not isinstance(lowerCamelCase, lowerCamelCase ):
raise TypeError("Input value must be an 'int' type" )
lowercase :int = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 158 | 0 |
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = '''Speech2TextFeatureExtractor'''
snake_case = '''Speech2TextTokenizer'''
def __init__( self : str , __UpperCAmelCase : Tuple , __UpperCAmelCase : Tuple ):
'''simple docstring'''
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
_A = self.feature_extractor
_A = False
def __call__( self : Tuple , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Tuple ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*__UpperCAmelCase , **__UpperCAmelCase )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
_A = kwargs.pop("raw_speech" )
else:
_A = kwargs.pop("audio" , __UpperCAmelCase )
_A = kwargs.pop("sampling_rate" , __UpperCAmelCase )
_A = kwargs.pop("text" , __UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
_A = args[0]
_A = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
_A = self.feature_extractor(__UpperCAmelCase , *__UpperCAmelCase , sampling_rate=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None:
_A = self.tokenizer(__UpperCAmelCase , **__UpperCAmelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_A = encodings["input_ids"]
return inputs
def lowerCAmelCase ( self : List[Any] , *__UpperCAmelCase : Tuple , **__UpperCAmelCase : Optional[int] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def lowerCAmelCase ( self : Any , *__UpperCAmelCase : Any , **__UpperCAmelCase : Optional[int] ):
'''simple docstring'''
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@contextmanager
def lowerCAmelCase ( self : str ):
'''simple docstring'''
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
_A = True
_A = self.tokenizer
yield
_A = self.feature_extractor
_A = False
| 79 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
def UpperCAmelCase_ ( self :Optional[Any] )-> Tuple:
A__ = tempfile.mkdtemp()
# fmt: off
A__ = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
A__ = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
A__ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
A__ = {"unk_token": "<unk>"}
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowercase_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowercase_ ) )
A__ = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"image_std": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
A__ = os.path.join(self.tmpdirname , lowercase_ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self :Any , **lowercase_ :Union[str, Any] )-> Tuple:
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCAmelCase_ ( self :Any , **lowercase_ :Tuple )-> Dict:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCAmelCase_ ( self :Dict , **lowercase_ :Union[str, Any] )-> Any:
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCAmelCase_ ( self :List[str] )-> int:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self :Optional[int] )-> Optional[int]:
A__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A__ = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase_ ( self :int )-> List[Any]:
A__ = self.get_tokenizer()
A__ = self.get_rust_tokenizer()
A__ = self.get_image_processor()
A__ = CLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
processor_slow.save_pretrained(self.tmpdirname )
A__ = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase_ )
A__ = CLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
processor_fast.save_pretrained(self.tmpdirname )
A__ = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowercase_ )
self.assertIsInstance(processor_fast.tokenizer , lowercase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowercase_ )
self.assertIsInstance(processor_fast.image_processor , lowercase_ )
def UpperCAmelCase_ ( self :Optional[Any] )-> Optional[int]:
A__ = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
A__ = self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0 )
A__ = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowercase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase_ )
def UpperCAmelCase_ ( self :List[Any] )-> Tuple:
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
A__ = self.prepare_image_inputs()
A__ = image_processor(lowercase_ , return_tensors="np" )
A__ = processor(images=lowercase_ , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase_ ( self :Optional[int] )-> Dict:
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
A__ = "lower newer"
A__ = processor(text=lowercase_ )
A__ = tokenizer(lowercase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase_ ( self :str )-> Any:
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
A__ = "lower newer"
A__ = self.prepare_image_inputs()
A__ = processor(text=lowercase_ , images=lowercase_ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowercase_ ):
processor()
def UpperCAmelCase_ ( self :Tuple )-> Tuple:
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
A__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A__ = processor.batch_decode(lowercase_ )
A__ = tokenizer.batch_decode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self :List[Any] )-> Dict:
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
A__ = "lower newer"
A__ = self.prepare_image_inputs()
A__ = processor(text=lowercase_ , images=lowercase_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 237 | 0 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> bool:
if len(__lowerCAmelCase ) == 0:
return False
UpperCamelCase__ : Any = len(__lowerCAmelCase ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , __lowerCAmelCase )
else:
return binary_search(a_list[midpoint + 1 :] , __lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase : Any =input('''Enter numbers separated by comma:\n''').strip()
lowerCamelCase : Dict =[int(item.strip()) for item in user_input.split(''',''')]
lowerCamelCase : List[str] =int(input('''Enter the number to be found in the list:\n''').strip())
lowerCamelCase : Union[str, Any] ='''''' if binary_search(sequence, target) else '''not '''
print(F"""{target} was {not_str}found in {sequence}""") | 357 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowerCamelCase : Optional[Any] =False
class __a ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __a ( unittest.TestCase ):
def __lowercase ( self : Any ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCamelCase__ : int = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
UpperCamelCase__ : Dict = torch.manual_seed(0 )
UpperCamelCase__ : str = pipe.dual_guided(
prompt="first prompt" , image=SCREAMING_SNAKE_CASE , text_to_image_strength=0.7_5 , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = VersatileDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = generator.manual_seed(0 )
UpperCamelCase__ : Dict = pipe.dual_guided(
prompt="first prompt" , image=SCREAMING_SNAKE_CASE , text_to_image_strength=0.7_5 , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = "cyberpunk 2077"
UpperCamelCase__ : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
UpperCamelCase__ : int = torch.manual_seed(0 )
UpperCamelCase__ : Any = pipe.dual_guided(
prompt=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , text_to_image_strength=0.7_5 , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
UpperCamelCase__ : Tuple = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ : Optional[int] = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
UpperCamelCase__ : List[str] = "A painting of a squirrel eating a burger "
UpperCamelCase__ : List[Any] = torch.manual_seed(0 )
UpperCamelCase__ : Dict = pipe.text_to_image(
prompt=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
UpperCamelCase__ : Union[str, Any] = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ : Dict = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
UpperCamelCase__ : Any = pipe.image_variation(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , output_type="numpy" ).images
UpperCamelCase__ : Optional[Any] = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ : int = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 | 196 | 0 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : Dict = logging.get_logger(__name__)
snake_case__ : List[str] = {
'''BridgeTower/bridgetower-base''': '''https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json''',
'''BridgeTower/bridgetower-base-itm-mlm''': (
'''https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'''
),
}
class snake_case_( a__ ):
__UpperCamelCase = '''bridgetower_vision_model'''
def __init__( self : Tuple , UpperCamelCase_ : List[str]=7_6_8 , UpperCamelCase_ : Optional[Any]=1_2 , UpperCamelCase_ : List[Any]=3 , UpperCamelCase_ : Any=1_6 , UpperCamelCase_ : Dict=2_8_8 , UpperCamelCase_ : Tuple=1 , UpperCamelCase_ : Any=1E-05 , UpperCamelCase_ : str=False , UpperCamelCase_ : int=True , UpperCamelCase_ : int=False , **UpperCamelCase_ : Optional[int] , ):
super().__init__(**UpperCamelCase_ )
lowerCAmelCase : Any = hidden_size
lowerCAmelCase : Any = num_hidden_layers
lowerCAmelCase : Union[str, Any] = num_channels
lowerCAmelCase : Tuple = patch_size
lowerCAmelCase : Any = image_size
lowerCAmelCase : List[str] = initializer_factor
lowerCAmelCase : List[Any] = layer_norm_eps
lowerCAmelCase : List[str] = stop_gradient
lowerCAmelCase : str = share_layernorm
lowerCAmelCase : Tuple = remove_last_layer
@classmethod
def lowerCamelCase__ ( cls : List[str] , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : Optional[Any] ):
lowerCAmelCase, lowerCAmelCase : Tuple = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
if config_dict.get('''model_type''' ) == "bridgetower":
lowerCAmelCase : Union[str, Any] = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class snake_case_( a__ ):
__UpperCamelCase = '''bridgetower_text_model'''
def __init__( self : List[str] , UpperCamelCase_ : int=5_0_2_6_5 , UpperCamelCase_ : int=7_6_8 , UpperCamelCase_ : Union[str, Any]=1_2 , UpperCamelCase_ : str=1_2 , UpperCamelCase_ : Any=1 , UpperCamelCase_ : str=3_0_7_2 , UpperCamelCase_ : List[Any]="gelu" , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : Dict=5_1_4 , UpperCamelCase_ : Optional[Any]=1 , UpperCamelCase_ : Tuple=1E-05 , UpperCamelCase_ : Any=1 , UpperCamelCase_ : List[Any]=0 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Optional[int]="absolute" , UpperCamelCase_ : List[Any]=True , **UpperCamelCase_ : int , ):
super().__init__(**UpperCamelCase_ )
lowerCAmelCase : List[Any] = vocab_size
lowerCAmelCase : Tuple = hidden_size
lowerCAmelCase : Optional[int] = num_hidden_layers
lowerCAmelCase : List[str] = num_attention_heads
lowerCAmelCase : Any = hidden_act
lowerCAmelCase : Optional[int] = initializer_factor
lowerCAmelCase : List[Any] = intermediate_size
lowerCAmelCase : int = hidden_dropout_prob
lowerCAmelCase : List[str] = attention_probs_dropout_prob
lowerCAmelCase : Optional[Any] = max_position_embeddings
lowerCAmelCase : Dict = type_vocab_size
lowerCAmelCase : List[str] = layer_norm_eps
lowerCAmelCase : int = position_embedding_type
lowerCAmelCase : Optional[int] = use_cache
lowerCAmelCase : Union[str, Any] = pad_token_id
lowerCAmelCase : int = bos_token_id
lowerCAmelCase : List[Any] = eos_token_id
@classmethod
def lowerCamelCase__ ( cls : Any , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : Optional[Any] ):
lowerCAmelCase, lowerCAmelCase : str = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
if config_dict.get('''model_type''' ) == "bridgetower":
lowerCAmelCase : List[Any] = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class snake_case_( a__ ):
__UpperCamelCase = '''bridgetower'''
def __init__( self : Union[str, Any] , UpperCamelCase_ : Tuple=True , UpperCamelCase_ : str="gelu" , UpperCamelCase_ : List[str]=7_6_8 , UpperCamelCase_ : int=1 , UpperCamelCase_ : Union[str, Any]=1E-05 , UpperCamelCase_ : Any=False , UpperCamelCase_ : Dict="add" , UpperCamelCase_ : str=1_2 , UpperCamelCase_ : Optional[Any]=6 , UpperCamelCase_ : Any=False , UpperCamelCase_ : Dict=False , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Optional[int]=None , **UpperCamelCase_ : Optional[int] , ):
# TODO: remove this once the Hub files are updated.
lowerCAmelCase : str = kwargs.pop('''text_config_dict''' , UpperCamelCase_ )
lowerCAmelCase : Tuple = kwargs.pop('''vision_config_dict''' , UpperCamelCase_ )
super().__init__(**UpperCamelCase_ )
lowerCAmelCase : Any = share_cross_modal_transformer_layers
lowerCAmelCase : Dict = hidden_act
lowerCAmelCase : List[Any] = hidden_size
lowerCAmelCase : List[Any] = initializer_factor
lowerCAmelCase : Optional[int] = layer_norm_eps
lowerCAmelCase : Union[str, Any] = share_link_tower_layers
lowerCAmelCase : Any = link_tower_type
lowerCAmelCase : Optional[int] = num_attention_heads
lowerCAmelCase : List[str] = num_hidden_layers
lowerCAmelCase : Optional[int] = tie_word_embeddings
lowerCAmelCase : Tuple = init_layernorm_from_vision_encoder
if text_config is None:
lowerCAmelCase : Optional[Any] = {}
logger.info('''`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.''' )
if vision_config is None:
lowerCAmelCase : int = {}
logger.info('''`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.''' )
lowerCAmelCase : Dict = BridgeTowerTextConfig(**UpperCamelCase_ )
lowerCAmelCase : int = BridgeTowerVisionConfig(**UpperCamelCase_ )
@classmethod
def lowerCamelCase__ ( cls : List[Any] , UpperCamelCase_ : BridgeTowerTextConfig , UpperCamelCase_ : BridgeTowerVisionConfig , **UpperCamelCase_ : Optional[Any] ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCamelCase_ )
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
lowerCAmelCase : Union[str, Any] = self.text_config.to_dict()
lowerCAmelCase : Optional[Any] = self.vision_config.to_dict()
lowerCAmelCase : Optional[int] = self.__class__.model_type
return output
| 60 |
'''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
__SCREAMING_SNAKE_CASE :Dict = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class A_ :
def __init__( self : List[Any] , snake_case_ : int , snake_case_ : Dict=1_6 , snake_case_ : Dict=1_3 , snake_case_ : int=7 , snake_case_ : Any=1_4 , snake_case_ : int=1_0 , snake_case_ : Any=1_9 , snake_case_ : int=5 , snake_case_ : Any=4 , snake_case_ : Tuple=True , snake_case_ : Optional[int]=1_6 , snake_case_ : List[str]=2 , snake_case_ : Any=4 , snake_case_ : List[Any]=4 , snake_case_ : Optional[Any]="gelu" , snake_case_ : Optional[int]=0.1 , snake_case_ : Union[str, Any]=0.1 , snake_case_ : Tuple=[1, 2, 3, 4, 5] , snake_case_ : str=2_5 , snake_case_ : Any=5 , ):
_UpperCAmelCase = d_model
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = prediction_length
_UpperCAmelCase = context_length
_UpperCAmelCase = cardinality
_UpperCAmelCase = num_time_features
_UpperCAmelCase = lags_sequence
_UpperCAmelCase = embedding_dimension
_UpperCAmelCase = is_training
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = context_length
_UpperCAmelCase = prediction_length + label_length
_UpperCAmelCase = label_length
_UpperCAmelCase = moving_average
_UpperCAmelCase = autocorrelation_factor
def lowercase ( self : Union[str, Any] ):
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def lowercase ( self : int , snake_case_ : Optional[Any] ):
_UpperCAmelCase = config.context_length + max(config.lags_sequence )
_UpperCAmelCase = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
_UpperCAmelCase = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
_UpperCAmelCase = floats_tensor([self.batch_size, _past_length] )
_UpperCAmelCase = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
_UpperCAmelCase = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
_UpperCAmelCase = floats_tensor([self.batch_size, config.prediction_length] )
_UpperCAmelCase = {
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def lowercase ( self : List[Any] ):
_UpperCAmelCase = self.get_config()
_UpperCAmelCase = self.prepare_autoformer_inputs_dict(snake_case_ )
return config, inputs_dict
def lowercase ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase ( self : Optional[Any] , snake_case_ : int , snake_case_ : Optional[int] ):
_UpperCAmelCase = AutoformerModel(config=snake_case_ ).to(snake_case_ ).eval()
_UpperCAmelCase = model(**snake_case_ )
_UpperCAmelCase = outputs.encoder_last_hidden_state
_UpperCAmelCase = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = model.get_encoder()
encoder.save_pretrained(snake_case_ )
_UpperCAmelCase = AutoformerEncoder.from_pretrained(snake_case_ ).to(snake_case_ )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = model.create_network_inputs(**snake_case_ )
_UpperCAmelCase , _UpperCAmelCase = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
_UpperCAmelCase = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
_UpperCAmelCase = encoder(inputs_embeds=snake_case_ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
_UpperCAmelCase = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
_UpperCAmelCase = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
_UpperCAmelCase = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
_UpperCAmelCase = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = model.get_decoder()
decoder.save_pretrained(snake_case_ )
_UpperCAmelCase = AutoformerDecoder.from_pretrained(snake_case_ ).to(snake_case_ )
_UpperCAmelCase = decoder(
trend=snake_case_ , inputs_embeds=snake_case_ , encoder_hidden_states=snake_case_ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class A_ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : List[Any] = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_lowerCamelCase : Tuple = (AutoformerForPrediction,) if is_torch_available() else ()
_lowerCamelCase : List[Any] = {"""feature-extraction""": AutoformerModel} if is_torch_available() else {}
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : Tuple = False
_lowerCamelCase : int = False
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : List[Any] = False
def lowercase ( self : Tuple ):
_UpperCAmelCase = AutoformerModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def lowercase ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(snake_case_ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ )
_UpperCAmelCase , _UpperCAmelCase = model_class.from_pretrained(snake_case_ , output_loading_info=snake_case_ )
self.assertEqual(info["missing_keys"] , [] )
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*snake_case_ )
@unittest.skip(reason="Model has no tokens embeddings" )
def lowercase ( self : Optional[int] ):
pass
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = inspect.signature(getattr(snake_case_ , "forward" ) )
# The main input is the name of the argument after `self`
_UpperCAmelCase = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , snake_case_ )
def lowercase ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(snake_case_ )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = [
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask" )
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
] )
self.assertListEqual(arg_names[: len(snake_case_ )] , snake_case_ )
def lowercase ( self : Optional[int] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = True
_UpperCAmelCase = getattr(self.model_tester , "seq_length" , snake_case_ )
_UpperCAmelCase = getattr(self.model_tester , "decoder_seq_length" , snake_case_ )
_UpperCAmelCase = getattr(self.model_tester , "encoder_seq_length" , snake_case_ )
_UpperCAmelCase = getattr(self.model_tester , "d_model" , snake_case_ )
_UpperCAmelCase = getattr(self.model_tester , "num_attention_heads" , snake_case_ )
_UpperCAmelCase = d_model // num_attention_heads
for model_class in self.all_model_classes:
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = True
_UpperCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
_UpperCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_UpperCAmelCase = True
_UpperCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
_UpperCAmelCase = outputs.encoder_attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
_UpperCAmelCase = len(snake_case_ )
_UpperCAmelCase = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(snake_case_ , snake_case_ )
# decoder attentions
_UpperCAmelCase = outputs.decoder_attentions
self.assertIsInstance(snake_case_ , (list, tuple) )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
_UpperCAmelCase = outputs.cross_attentions
self.assertIsInstance(snake_case_ , (list, tuple) )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(out_len + 2 , len(snake_case_ ) )
_UpperCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def lowercase ( self : Dict ):
super().test_retain_grad_hidden_states_attentions()
def UpperCAmelCase_ ( __lowercase : str="train-batch.pt" ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=__lowercase , repo_type="dataset" )
_UpperCAmelCase = torch.load(__lowercase , map_location=__lowercase )
return batch
@require_torch
@slow
class A_ ( unittest.TestCase ):
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case_ )
_UpperCAmelCase = prepare_batch()
with torch.no_grad():
_UpperCAmelCase = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0]
_UpperCAmelCase = torch.Size(
(6_4, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , snake_case_ )
_UpperCAmelCase = torch.tensor(
[[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] , device=snake_case_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def lowercase ( self : Optional[Any] ):
_UpperCAmelCase = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case_ )
_UpperCAmelCase = prepare_batch("val-batch.pt" )
with torch.no_grad():
_UpperCAmelCase = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state
_UpperCAmelCase = torch.Size((6_4, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , snake_case_ )
_UpperCAmelCase = torch.tensor(
[[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] , device=snake_case_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def lowercase ( self : Tuple ):
_UpperCAmelCase = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case_ )
_UpperCAmelCase = prepare_batch("val-batch.pt" )
with torch.no_grad():
_UpperCAmelCase = model.generate(
static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , )
_UpperCAmelCase = torch.Size((6_4, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , snake_case_ )
_UpperCAmelCase = torch.tensor([3_1_3_0.6_7_6_3, 4_0_5_6.5_2_9_3, 7_0_5_3.0_7_8_6] , device=snake_case_ )
_UpperCAmelCase = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , snake_case_ , rtol=1e-1 ) )
| 22 | 0 |
"""simple docstring"""
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
A__ : Dict = 4
A__ : Union[str, Any] = 3
class lowercase__ ( snake_case__ ):
pass
def _snake_case ( lowerCamelCase__ : List[str] ) -> Optional[int]:
for shard in shards:
for i in range(lowerCamelCase__ ):
yield {"i": i, "shard": shard}
def _snake_case ( ) -> Optional[Any]:
lowerCamelCase_ : List[Any] =int(os.environ["RANK"] )
lowerCamelCase_ : List[Any] =int(os.environ["WORLD_SIZE"] )
lowerCamelCase_ : Optional[int] =ArgumentParser()
parser.add_argument("--streaming" , type=lowerCamelCase__ )
parser.add_argument("--local_rank" , type=lowerCamelCase__ )
parser.add_argument("--num_workers" , type=lowerCamelCase__ , default=0 )
lowerCamelCase_ : int =parser.parse_args()
lowerCamelCase_ : Tuple =args.streaming
lowerCamelCase_ : Dict =args.num_workers
lowerCamelCase_ : Optional[Any] ={"shards": [F"""shard_{shard_idx}""" for shard_idx in range(lowerCamelCase__ )]}
lowerCamelCase_ : Optional[Any] =IterableDataset.from_generator(lowerCamelCase__ , gen_kwargs=lowerCamelCase__ )
if not streaming:
lowerCamelCase_ : List[str] =Dataset.from_list(list(lowerCamelCase__ ) )
lowerCamelCase_ : Any =split_dataset_by_node(lowerCamelCase__ , rank=lowerCamelCase__ , world_size=lowerCamelCase__ )
lowerCamelCase_ : List[Any] =torch.utils.data.DataLoader(lowerCamelCase__ , num_workers=lowerCamelCase__ )
lowerCamelCase_ : Union[str, Any] =NUM_SHARDS * NUM_ITEMS_PER_SHARD
lowerCamelCase_ : List[str] =full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
lowerCamelCase_ : Optional[int] =sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F"""local_size {local_size} != expected_local_size {expected_local_size}""" )
if __name__ == "__main__":
main()
| 209 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase__ ( self : Dict ):
lowerCamelCase_ : Optional[Any] =1
lowerCamelCase_ : Union[str, Any] =3
lowerCamelCase_ : Dict =(32, 32)
lowerCamelCase_ : List[Any] =floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(snake_case__ )
return image
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
torch.manual_seed(0 )
lowerCamelCase_ : Dict =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def UpperCAmelCase__ ( self : List[Any] ):
torch.manual_seed(0 )
lowerCamelCase_ : Union[str, Any] =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
torch.manual_seed(0 )
lowerCamelCase_ : Any =RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(snake_case__ )
@property
def UpperCAmelCase__ ( self : int ):
def extract(*snake_case__ : Dict , **snake_case__ : int ):
class lowercase__ :
def __init__( self : Optional[Any] ):
lowerCamelCase_ : Union[str, Any] =torch.ones([0] )
def UpperCAmelCase__ ( self : List[Any] , snake_case__ : Any ):
self.pixel_values.to(snake_case__ )
return self
return Out()
return extract
def UpperCAmelCase__ ( self : Optional[Any] ):
lowerCamelCase_ : Dict ="cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ : Dict =self.dummy_cond_unet
lowerCamelCase_ : List[str] =PNDMScheduler(skip_prk_steps=snake_case__ )
lowerCamelCase_ : List[Any] =self.dummy_vae
lowerCamelCase_ : Any =self.dummy_text_encoder
lowerCamelCase_ : List[Any] =XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
lowerCamelCase_ : List[str] =77
lowerCamelCase_ : Optional[int] =self.dummy_image.to(snake_case__ )
lowerCamelCase_ : Any =init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
lowerCamelCase_ : List[Any] =AltDiffusionImgaImgPipeline(
unet=snake_case__ , scheduler=snake_case__ , vae=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , safety_checker=snake_case__ , feature_extractor=self.dummy_extractor , )
lowerCamelCase_ : int =VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=snake_case__ )
lowerCamelCase_ : Optional[int] =alt_pipe.to(snake_case__ )
alt_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : Any ="A painting of a squirrel eating a burger"
lowerCamelCase_ : Union[str, Any] =torch.Generator(device=snake_case__ ).manual_seed(0 )
lowerCamelCase_ : str =alt_pipe(
[prompt] , generator=snake_case__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=snake_case__ , )
lowerCamelCase_ : List[Any] =output.images
lowerCamelCase_ : str =torch.Generator(device=snake_case__ ).manual_seed(0 )
lowerCamelCase_ : Optional[Any] =alt_pipe(
[prompt] , generator=snake_case__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=snake_case__ , return_dict=snake_case__ , )[0]
lowerCamelCase_ : Tuple =image[0, -3:, -3:, -1]
lowerCamelCase_ : Union[str, Any] =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase_ : List[str] =np.array([0.4_427, 0.3_731, 0.4_249, 0.4_941, 0.4_546, 0.4_148, 0.4_193, 0.4_666, 0.4_499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def UpperCAmelCase__ ( self : Tuple ):
lowerCamelCase_ : Dict =self.dummy_cond_unet
lowerCamelCase_ : Any =PNDMScheduler(skip_prk_steps=snake_case__ )
lowerCamelCase_ : Tuple =self.dummy_vae
lowerCamelCase_ : Union[str, Any] =self.dummy_text_encoder
lowerCamelCase_ : Optional[Any] =XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
lowerCamelCase_ : Tuple =77
lowerCamelCase_ : str =self.dummy_image.to(snake_case__ )
# put models in fp16
lowerCamelCase_ : Optional[Any] =unet.half()
lowerCamelCase_ : Dict =vae.half()
lowerCamelCase_ : str =bert.half()
# make sure here that pndm scheduler skips prk
lowerCamelCase_ : Optional[int] =AltDiffusionImgaImgPipeline(
unet=snake_case__ , scheduler=snake_case__ , vae=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , safety_checker=snake_case__ , feature_extractor=self.dummy_extractor , )
lowerCamelCase_ : List[Any] =VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=snake_case__ )
lowerCamelCase_ : int =alt_pipe.to(snake_case__ )
alt_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : Tuple ="A painting of a squirrel eating a burger"
lowerCamelCase_ : Tuple =torch.manual_seed(0 )
lowerCamelCase_ : List[Any] =alt_pipe(
[prompt] , generator=snake_case__ , num_inference_steps=2 , output_type="np" , image=snake_case__ , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def UpperCAmelCase__ ( self : Union[str, Any] ):
lowerCamelCase_ : int =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
lowerCamelCase_ : Any =init_image.resize((760, 504) )
lowerCamelCase_ : List[str] ="BAAI/AltDiffusion"
lowerCamelCase_ : List[str] =AltDiffusionImgaImgPipeline.from_pretrained(
snake_case__ , safety_checker=snake_case__ , )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
lowerCamelCase_ : Optional[int] ="A fantasy landscape, trending on artstation"
lowerCamelCase_ : List[Any] =torch.manual_seed(0 )
lowerCamelCase_ : Optional[int] =pipe(
prompt=snake_case__ , image=snake_case__ , strength=0.75 , guidance_scale=7.5 , generator=snake_case__ , output_type="np" , )
lowerCamelCase_ : Optional[int] =output.images[0]
lowerCamelCase_ : Tuple =image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
lowerCamelCase_ : Optional[int] =np.array([0.9_358, 0.9_397, 0.9_599, 0.9_901, 1.0_000, 1.0_000, 0.9_882, 1.0_000, 1.0_000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : List[Any] ):
lowerCamelCase_ : str =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
lowerCamelCase_ : Any =init_image.resize((768, 512) )
lowerCamelCase_ : Optional[int] =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
lowerCamelCase_ : Dict ="BAAI/AltDiffusion"
lowerCamelCase_ : List[str] =AltDiffusionImgaImgPipeline.from_pretrained(
snake_case__ , safety_checker=snake_case__ , )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
lowerCamelCase_ : Optional[Any] ="A fantasy landscape, trending on artstation"
lowerCamelCase_ : Dict =torch.manual_seed(0 )
lowerCamelCase_ : int =pipe(
prompt=snake_case__ , image=snake_case__ , strength=0.75 , guidance_scale=7.5 , generator=snake_case__ , output_type="np" , )
lowerCamelCase_ : int =output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 209 | 1 |
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_UpperCAmelCase : List[str] = 16
_UpperCAmelCase : Union[str, Any] = 32
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
return int(x / 2**20 )
class lowercase :
def __enter__( self ):
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
snake_case_ = torch.cuda.memory_allocated()
return self
def __exit__( self , *snake_case ):
gc.collect()
torch.cuda.empty_cache()
snake_case_ = torch.cuda.memory_allocated()
snake_case_ = torch.cuda.max_memory_allocated()
snake_case_ = bamb(self.end - self.begin )
snake_case_ = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ = 16 , UpperCamelCase__ = "bert-base-cased" , UpperCamelCase__ = 320 , UpperCamelCase__ = 160 , ):
'''simple docstring'''
snake_case_ = AutoTokenizer.from_pretrained(UpperCamelCase__ )
snake_case_ = load_dataset(
'glue' , 'mrpc' , split={'train': F'''train[:{n_train}]''', 'validation': F'''validation[:{n_val}]'''} )
def tokenize_function(UpperCamelCase__ ):
# max_length=None => use the model max length (it's actually the default)
snake_case_ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
snake_case_ = datasets.map(
UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=UpperCamelCase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case_ = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(UpperCamelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCamelCase__ , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(UpperCamelCase__ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
snake_case_ = DataLoader(
tokenized_datasets['train'] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ )
snake_case_ = DataLoader(
tokenized_datasets['validation'] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ )
return train_dataloader, eval_dataloader
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case_ = config['lr']
snake_case_ = int(config['num_epochs'] )
snake_case_ = int(config['seed'] )
snake_case_ = int(config['batch_size'] )
snake_case_ = args.model_name_or_path
set_seed(UpperCamelCase__ )
snake_case_ , snake_case_ = get_dataloaders(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case_ = AutoModelForSequenceClassification.from_pretrained(UpperCamelCase__ , return_dict=UpperCamelCase__ )
# Instantiate optimizer
snake_case_ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
snake_case_ = optimizer_cls(params=model.parameters() , lr=UpperCamelCase__ )
if accelerator.state.deepspeed_plugin is not None:
snake_case_ = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
snake_case_ = 1
snake_case_ = (len(UpperCamelCase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
snake_case_ = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase__ , num_warmup_steps=0 , num_training_steps=UpperCamelCase__ , )
else:
snake_case_ = DummyScheduler(UpperCamelCase__ , total_num_steps=UpperCamelCase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# We need to keep track of how many total steps we have iterated over
snake_case_ = 0
# We also need to keep track of the stating epoch so files are named properly
snake_case_ = 0
# Now we train the model
snake_case_ = {}
for epoch in range(UpperCamelCase__ , UpperCamelCase__ ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(UpperCamelCase__ ):
snake_case_ = model(**UpperCamelCase__ )
snake_case_ = outputs.loss
snake_case_ = loss / gradient_accumulation_steps
accelerator.backward(UpperCamelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('Memory before entering the train : {}'.format(bamb(tracemalloc.begin ) ) )
accelerator.print('Memory consumed at the end of the train (end-begin): {}'.format(tracemalloc.used ) )
accelerator.print('Peak Memory consumed during the train (max-begin): {}'.format(tracemalloc.peaked ) )
accelerator.print(
'Total Peak Memory consumed during the train (max): {}'.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
snake_case_ = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F'''epoch-{epoch}'''] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'peak_memory_utilization.json' ) , 'w' ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
def __lowerCamelCase ( ):
'''simple docstring'''
snake_case_ = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=UpperCamelCase__ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=UpperCamelCase__ , )
parser.add_argument(
'--output_dir' , type=UpperCamelCase__ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--peak_memory_upper_bound' , type=UpperCamelCase__ , default=UpperCamelCase__ , help='The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.' , )
parser.add_argument(
'--n_train' , type=UpperCamelCase__ , default=320 , help='Number of training examples to use.' , )
parser.add_argument(
'--n_val' , type=UpperCamelCase__ , default=160 , help='Number of validation examples to use.' , )
parser.add_argument(
'--num_epochs' , type=UpperCamelCase__ , default=1 , help='Number of train epochs.' , )
snake_case_ = parser.parse_args()
snake_case_ = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 285 |
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' , [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] , )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
snake_case_ = DatasetInfosDict.from_directory(UpperCamelCase__ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' , [
DatasetInfo(),
DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ),
] , )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = str(UpperCamelCase__ )
dataset_info.write_to_directory(UpperCamelCase__ )
snake_case_ = DatasetInfo.from_directory(UpperCamelCase__ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(UpperCamelCase__ , 'dataset_info.json' ) )
def __lowerCamelCase ( ):
'''simple docstring'''
snake_case_ = DatasetInfo(
description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
snake_case_ = dataset_info._to_yaml_dict()
assert sorted(UpperCamelCase__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
snake_case_ = yaml.safe_dump(UpperCamelCase__ )
snake_case_ = yaml.safe_load(UpperCamelCase__ )
assert dataset_info_yaml_dict == reloaded
def __lowerCamelCase ( ):
'''simple docstring'''
snake_case_ = DatasetInfo()
snake_case_ = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' , [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=1337 ),
} ),
] , )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = str(UpperCamelCase__ )
dataset_infos_dict.write_to_directory(UpperCamelCase__ )
snake_case_ = DatasetInfosDict.from_directory(UpperCamelCase__ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
snake_case_ = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
snake_case_ = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(UpperCamelCase__ , 'README.md' ) )
| 285 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCAmelCase__ = {
'''configuration_gpt_neo''': ['''GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoConfig''', '''GPTNeoOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoForCausalLM''',
'''GPTNeoForQuestionAnswering''',
'''GPTNeoForSequenceClassification''',
'''GPTNeoForTokenClassification''',
'''GPTNeoModel''',
'''GPTNeoPreTrainedModel''',
'''load_tf_weights_in_gpt_neo''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''FlaxGPTNeoForCausalLM''',
'''FlaxGPTNeoModel''',
'''FlaxGPTNeoPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 52 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {
'''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''],
'''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''],
'''processing_whisper''': ['''WhisperProcessor'''],
'''tokenization_whisper''': ['''WhisperTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''WhisperTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WhisperForConditionalGeneration''',
'''WhisperModel''',
'''WhisperPreTrainedModel''',
'''WhisperForAudioClassification''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWhisperForConditionalGeneration''',
'''TFWhisperModel''',
'''TFWhisperPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''FlaxWhisperForConditionalGeneration''',
'''FlaxWhisperModel''',
'''FlaxWhisperPreTrainedModel''',
'''FlaxWhisperForAudioClassification''',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 52 | 1 |
from math import ceil, sqrt
def lowerCAmelCase__( lowercase : int = 100_0000 ) -> int:
__snake_case : int = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
__snake_case : Tuple = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
__snake_case : int = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(F'''{solution() = }''')
| 326 |
import math
def lowerCAmelCase__( lowercase : list , lowercase : int = 0 , lowercase : int = 0 ) -> list:
__snake_case : Any = end or len(lowercase )
for i in range(lowercase , lowercase ):
__snake_case : List[str] = i
__snake_case : Union[str, Any] = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
__snake_case : Optional[Any] = array[temp_index - 1]
temp_index -= 1
__snake_case : Any = temp_index_value
return array
def lowerCAmelCase__( lowercase : list , lowercase : int , lowercase : int ) -> None: # Max Heap
__snake_case : Any = index
__snake_case : Optional[Any] = 2 * index + 1 # Left Node
__snake_case : str = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
__snake_case : Optional[int] = left_index
if right_index < heap_size and array[largest] < array[right_index]:
__snake_case : Tuple = right_index
if largest != index:
__snake_case , __snake_case : int = array[largest], array[index]
heapify(lowercase , lowercase , lowercase )
def lowerCAmelCase__( lowercase : list ) -> list:
__snake_case : List[str] = len(lowercase )
for i in range(n // 2 , -1 , -1 ):
heapify(lowercase , lowercase , lowercase )
for i in range(n - 1 , 0 , -1 ):
__snake_case , __snake_case : Optional[Any] = array[0], array[i]
heapify(lowercase , 0 , lowercase )
return array
def lowerCAmelCase__( lowercase : list , lowercase : int , lowercase : int , lowercase : int ) -> int:
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def lowerCAmelCase__( lowercase : list , lowercase : int , lowercase : int , lowercase : int ) -> int:
__snake_case : Union[str, Any] = low
__snake_case : Union[str, Any] = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
__snake_case , __snake_case : str = array[j], array[i]
i += 1
def lowerCAmelCase__( lowercase : list ) -> list:
if len(lowercase ) == 0:
return array
__snake_case : Union[str, Any] = 2 * math.ceil(math.loga(len(lowercase ) ) )
__snake_case : Dict = 16
return intro_sort(lowercase , 0 , len(lowercase ) , lowercase , lowercase )
def lowerCAmelCase__( lowercase : list , lowercase : int , lowercase : int , lowercase : int , lowercase : int ) -> list:
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(lowercase )
max_depth -= 1
__snake_case : List[str] = median_of_a(lowercase , lowercase , start + ((end - start) // 2) + 1 , end - 1 )
__snake_case : Optional[Any] = partition(lowercase , lowercase , lowercase , lowercase )
intro_sort(lowercase , lowercase , lowercase , lowercase , lowercase )
__snake_case : List[str] = p
return insertion_sort(lowercase , lowercase , lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCamelCase = input('''Enter numbers separated by a comma : ''').strip()
_UpperCamelCase = [float(item) for item in user_input.split(''',''')]
print(sort(unsorted))
| 326 | 1 |
import math
from collections.abc import Iterator
from itertools import takewhile
def lowerCAmelCase__ ( lowerCamelCase_ : int):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(lowerCamelCase_) + 1) ,6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase__ : Any = 2
while True:
if is_prime(lowerCamelCase_):
yield num
num += 1
def lowerCAmelCase__ ( lowerCamelCase_ : int = 2000000):
'''simple docstring'''
return sum(takewhile(lambda lowerCamelCase_: x < n ,prime_generator()))
if __name__ == "__main__":
print(f"""{solution() = }""")
| 94 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case : Any ={
'configuration_blenderbot_small': [
'BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotSmallConfig',
'BlenderbotSmallOnnxConfig',
],
'tokenization_blenderbot_small': ['BlenderbotSmallTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] =['BlenderbotSmallTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[str] =[
'BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotSmallForCausalLM',
'BlenderbotSmallForConditionalGeneration',
'BlenderbotSmallModel',
'BlenderbotSmallPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str =[
'TFBlenderbotSmallForConditionalGeneration',
'TFBlenderbotSmallModel',
'TFBlenderbotSmallPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Dict =[
'FlaxBlenderbotSmallForConditionalGeneration',
'FlaxBlenderbotSmallModel',
'FlaxBlenderbotSmallPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
__snake_case : Optional[int] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 94 | 1 |
from collections.abc import Sequence
from queue import Queue
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None) -> Union[str, Any]:
_A : str = start
_A : Optional[int] = end
_A : List[str] = val
_A : Tuple = (start + end) // 2
_A : Any = left
_A : List[Any] = right
def __repr__( self) -> Any:
return F"SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})"
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase) -> List[str]:
_A : str = collection
_A : Optional[Any] = function
if self.collection:
_A : Optional[int] = self._build_tree(0 , len(__lowerCamelCase) - 1)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase) -> Tuple:
self._update_tree(self.root , __lowerCamelCase , __lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase) -> List[str]:
return self._query_range(self.root , __lowerCamelCase , __lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase) -> List[str]:
if start == end:
return SegmentTreeNode(__lowerCamelCase , __lowerCamelCase , self.collection[start])
_A : List[Any] = (start + end) // 2
_A : int = self._build_tree(__lowerCamelCase , __lowerCamelCase)
_A : Optional[Any] = self._build_tree(mid + 1 , __lowerCamelCase)
return SegmentTreeNode(__lowerCamelCase , __lowerCamelCase , self.fn(left.val , right.val) , __lowerCamelCase , __lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> int:
if node.start == i and node.end == i:
_A : List[str] = val
return
if i <= node.mid:
self._update_tree(node.left , __lowerCamelCase , __lowerCamelCase)
else:
self._update_tree(node.right , __lowerCamelCase , __lowerCamelCase)
_A : str = self.fn(node.left.val , node.right.val)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> str:
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , __lowerCamelCase , __lowerCamelCase)
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , __lowerCamelCase , node.mid) , self._query_range(node.right , node.mid + 1 , __lowerCamelCase) , )
else:
# range in right child tree
return self._query_range(node.right , __lowerCamelCase , __lowerCamelCase)
def _lowerCamelCase ( self) -> Dict:
if self.root is not None:
_A : Optional[int] = Queue()
queue.put(self.root)
while not queue.empty():
_A : Tuple = queue.get()
yield node
if node.left is not None:
queue.put(node.left)
if node.right is not None:
queue.put(node.right)
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('*' * 50)
lowerCAmelCase__ = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 11 |
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
lowerCamelCase_ = data_utils.TransfoXLTokenizer
lowerCamelCase_ = data_utils.TransfoXLCorpus
lowerCamelCase_ = data_utils
lowerCamelCase_ = data_utils
def __magic_name__ ( __a : List[Any] , __a : str , __a : Optional[Any] , __a : List[str] ):
'''simple docstring'''
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(__a , """rb""" ) as fp:
UpperCamelCase__ = pickle.load(__a , encoding="""latin1""" )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
UpperCamelCase__ = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""pretrained_vocab_file"""]
print(f"Save vocabulary to {pytorch_vocab_dump_path}" )
UpperCamelCase__ = corpus.vocab.__dict__
torch.save(__a , __a )
UpperCamelCase__ = corpus.__dict__
corpus_dict_no_vocab.pop("""vocab""" , __a )
UpperCamelCase__ = pytorch_dump_folder_path + """/""" + CORPUS_NAME
print(f"Save dataset to {pytorch_dataset_dump_path}" )
torch.save(__a , __a )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
UpperCamelCase__ = os.path.abspath(__a )
UpperCamelCase__ = os.path.abspath(__a )
print(f"Converting Transformer XL checkpoint from {tf_path} with config at {config_path}." )
# Initialise PyTorch model
if transfo_xl_config_file == "":
UpperCamelCase__ = TransfoXLConfig()
else:
UpperCamelCase__ = TransfoXLConfig.from_json_file(__a )
print(f"Building PyTorch model from configuration: {config}" )
UpperCamelCase__ = TransfoXLLMHeadModel(__a )
UpperCamelCase__ = load_tf_weights_in_transfo_xl(__a , __a , __a )
# Save pytorch-model
UpperCamelCase__ = os.path.join(__a , __a )
UpperCamelCase__ = os.path.join(__a , __a )
print(f"Save PyTorch model to {os.path.abspath(__a )}" )
torch.save(model.state_dict() , __a )
print(f"Save configuration file to {os.path.abspath(__a )}" )
with open(__a , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--tf_checkpoint_path''',
default='''''',
type=str,
help='''An optional path to a TensorFlow checkpoint path to be converted.''',
)
parser.add_argument(
'''--transfo_xl_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--transfo_xl_dataset_file''',
default='''''',
type=str,
help='''An optional dataset file to be converted in a vocabulary.''',
)
lowerCamelCase_ = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 244 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 350 |
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def _lowerCAmelCase ( ):
print('''Making key files...''' )
make_key_files('''rsa''' , 1_0_2_4 )
print('''Key files generation successful.''' )
def _lowerCAmelCase ( lowerCamelCase_ : int ):
print('''Generating prime p...''' )
__lowercase = rabinMiller.generate_large_prime(lowerCamelCase_ )
print('''Generating prime q...''' )
__lowercase = rabinMiller.generate_large_prime(lowerCamelCase_ )
__lowercase = p * q
print('''Generating e that is relatively prime to (p - 1) * (q - 1)...''' )
while True:
__lowercase = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(lowerCamelCase_ , (p - 1) * (q - 1) ) == 1:
break
print('''Calculating d that is mod inverse of e...''' )
__lowercase = cryptoMath.find_mod_inverse(lowerCamelCase_ , (p - 1) * (q - 1) )
__lowercase = (n, e)
__lowercase = (n, d)
return (public_key, private_key)
def _lowerCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : int ):
if os.path.exists(f"{name}_pubkey.txt" ) or os.path.exists(f"{name}_privkey.txt" ):
print('''\nWARNING:''' )
print(
f"\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"
'''Use a different name or delete these files and re-run this program.''' )
sys.exit()
__lowercase , __lowercase = generate_key(lowerCamelCase_ )
print(f"\nWriting public key to file {name}_pubkey.txt..." )
with open(f"{name}_pubkey.txt" , '''w''' ) as out_file:
out_file.write(f"{key_size},{public_key[0]},{public_key[1]}" )
print(f"Writing private key to file {name}_privkey.txt..." )
with open(f"{name}_privkey.txt" , '''w''' ) as out_file:
out_file.write(f"{key_size},{private_key[0]},{private_key[1]}" )
if __name__ == "__main__":
main()
| 217 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = """convbert"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Dict=3_05_22 , SCREAMING_SNAKE_CASE_ : int=7_68 , SCREAMING_SNAKE_CASE_ : List[str]=12 , SCREAMING_SNAKE_CASE_ : List[str]=12 , SCREAMING_SNAKE_CASE_ : Dict=30_72 , SCREAMING_SNAKE_CASE_ : Optional[int]="gelu" , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : int=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=5_12 , SCREAMING_SNAKE_CASE_ : List[Any]=2 , SCREAMING_SNAKE_CASE_ : List[str]=0.02 , SCREAMING_SNAKE_CASE_ : int=1E-12 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1 , SCREAMING_SNAKE_CASE_ : int=0 , SCREAMING_SNAKE_CASE_ : str=2 , SCREAMING_SNAKE_CASE_ : List[Any]=7_68 , SCREAMING_SNAKE_CASE_ : Optional[Any]=2 , SCREAMING_SNAKE_CASE_ : Any=9 , SCREAMING_SNAKE_CASE_ : Tuple=1 , SCREAMING_SNAKE_CASE_ : List[Any]=None , **SCREAMING_SNAKE_CASE_ : List[str] , ) -> List[Any]:
'''simple docstring'''
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
A: Dict = vocab_size
A: Tuple = hidden_size
A: Optional[int] = num_hidden_layers
A: List[str] = num_attention_heads
A: int = intermediate_size
A: int = hidden_act
A: List[str] = hidden_dropout_prob
A: int = attention_probs_dropout_prob
A: Tuple = max_position_embeddings
A: Any = type_vocab_size
A: str = initializer_range
A: Union[str, Any] = layer_norm_eps
A: str = embedding_size
A: Optional[int] = head_ratio
A: List[Any] = conv_kernel_size
A: List[Any] = num_groups
A: Optional[int] = classifier_dropout
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
@property
def _snake_case ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
A: Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
A: List[str] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 319 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
UpperCamelCase = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
UpperCamelCase = {
'''allenai/longformer-base-4096''': 4096,
'''allenai/longformer-large-4096''': 4096,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 4096,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 4096,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def SCREAMING_SNAKE_CASE( ) -> Dict:
A: Dict = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
A: Union[str, Any] = bs[:]
A: List[str] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__lowercase )
cs.append(2**8 + n )
n += 1
A: List[Any] = [chr(__lowercase ) for n in cs]
return dict(zip(__lowercase , __lowercase ) )
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[int]:
A: Optional[Any] = set()
A: Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A: List[Any] = char
return pairs
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : int = VOCAB_FILES_NAMES
UpperCamelCase_ : int = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : int = ["""input_ids""", """attention_mask"""]
def __init__( self : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str="replace" , SCREAMING_SNAKE_CASE_ : str="<s>" , SCREAMING_SNAKE_CASE_ : Any="</s>" , SCREAMING_SNAKE_CASE_ : int="</s>" , SCREAMING_SNAKE_CASE_ : List[Any]="<s>" , SCREAMING_SNAKE_CASE_ : str="<unk>" , SCREAMING_SNAKE_CASE_ : Dict="<pad>" , SCREAMING_SNAKE_CASE_ : Dict="<mask>" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False , **SCREAMING_SNAKE_CASE_ : Tuple , ) -> List[str]:
'''simple docstring'''
A: int = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else bos_token
A: Dict = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else eos_token
A: int = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else sep_token
A: Dict = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else cls_token
A: Any = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else unk_token
A: str = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
A: Dict = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
errors=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as vocab_handle:
A: str = json.load(SCREAMING_SNAKE_CASE_ )
A: str = {v: k for k, v in self.encoder.items()}
A: Union[str, Any] = errors # how to handle errors in decoding
A: Optional[int] = bytes_to_unicode()
A: Union[str, Any] = {v: k for k, v in self.byte_encoder.items()}
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as merges_handle:
A: int = merges_handle.read().split('''\n''' )[1:-1]
A: str = [tuple(merge.split() ) for merge in bpe_merges]
A: Any = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
A: Union[str, Any] = {}
A: Tuple = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
A: Dict = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def _snake_case ( self : int ) -> List[Any]:
'''simple docstring'''
return len(self.encoder )
def _snake_case ( self : Optional[Any] ) -> int:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
A: str = tuple(SCREAMING_SNAKE_CASE_ )
A: str = get_pairs(SCREAMING_SNAKE_CASE_ )
if not pairs:
return token
while True:
A: Dict = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
A , A: Optional[Any] = bigram
A: Tuple = []
A: List[Any] = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
try:
A: Union[str, Any] = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A: int = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A: Optional[Any] = tuple(SCREAMING_SNAKE_CASE_ )
A: Any = new_word
if len(SCREAMING_SNAKE_CASE_ ) == 1:
break
else:
A: Union[str, Any] = get_pairs(SCREAMING_SNAKE_CASE_ )
A: str = ''' '''.join(SCREAMING_SNAKE_CASE_ )
A: str = word
return word
def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
A: Dict = []
for token in re.findall(self.pat , SCREAMING_SNAKE_CASE_ ):
A: Tuple = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE_ ).split(''' ''' ) )
return bpe_tokens
def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) )
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> str:
'''simple docstring'''
return self.decoder.get(SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Tuple:
'''simple docstring'''
A: Optional[int] = ''''''.join(SCREAMING_SNAKE_CASE_ )
A: Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
A: Union[str, Any] = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
A: int = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '''\n''' )
A: Any = 0
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
A: Union[str, Any] = token_index
writer.write(''' '''.join(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A: int = [self.cls_token_id]
A: str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
A: Dict = [self.sep_token_id]
A: Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict=False , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> int:
'''simple docstring'''
A: Tuple = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE_ ) > 0 and not text[0].isspace()):
A: List[Any] = ''' ''' + text
return (text, kwargs)
| 319 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = '▁'
UpperCamelCase = {'vocab_file': 'sentencepiece.bpe.model'}
UpperCamelCase = {
'vocab_file': {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model',
}
}
UpperCamelCase = {
'facebook/xglm-564M': 2048,
}
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ["input_ids", "attention_mask"]
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int="<s>" , SCREAMING_SNAKE_CASE__ : List[Any]="</s>" , SCREAMING_SNAKE_CASE__ : List[str]="</s>" , SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : List[str]="<unk>" , SCREAMING_SNAKE_CASE__ : List[Any]="<pad>" , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE__ : int , ) -> None:
lowerCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
lowerCAmelCase__ = 7
lowerCAmelCase__ = [f'<madeupword{i}>' for i in range(self.num_madeup_words )]
lowerCAmelCase__ = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCAmelCase__ = 1
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCAmelCase__ = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
lowerCAmelCase__ = len(self.sp_model )
lowerCAmelCase__ = {f'<madeupword{i}>': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Any ) -> int:
lowerCAmelCase__ = self.__dict__.copy()
lowerCAmelCase__ = None
lowerCAmelCase__ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : List[str] , SCREAMING_SNAKE_CASE__ : Any ) -> Any:
lowerCAmelCase__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCAmelCase__ = {}
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
lowerCAmelCase__ = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ ))
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__ ))
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase__ = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def a ( self : int ) -> Any:
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def a ( self : str ) -> Dict:
lowerCAmelCase__ = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ )
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : int ) -> Union[str, Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCAmelCase__ = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : Tuple ) -> Any:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Dict:
lowerCAmelCase__ = "".join(SCREAMING_SNAKE_CASE__ ).replace(SCREAMING_SNAKE_CASE__ , " " ).strip()
return out_string
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase__ = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ , "wb" ) as fi:
lowerCAmelCase__ = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 353 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
UpperCamelCase = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def _A ( lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = {}
with open(lowerCAmelCase_ , "r" ) as file:
for line_number, line in enumerate(lowerCAmelCase_ ):
lowerCAmelCase__ = line.strip()
if line:
lowerCAmelCase__ = line.split()
lowerCAmelCase__ = line_number
lowerCAmelCase__ = words[0]
lowerCAmelCase__ = value
return result
def _A ( lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int ):
"""simple docstring"""
for attribute in key.split("." ):
lowerCAmelCase__ = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase__ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowerCAmelCase_ ):
lowerCAmelCase__ = PARAM_MAPPING[full_name.split("." )[-1]]
lowerCAmelCase__ = "param"
if weight_type is not None and weight_type != "param":
lowerCAmelCase__ = getattr(lowerCAmelCase_ , lowerCAmelCase_ ).shape
elif weight_type is not None and weight_type == "param":
lowerCAmelCase__ = hf_pointer
for attribute in hf_param_name.split("." ):
lowerCAmelCase__ = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase__ = shape_pointer.shape
# let's reduce dimension
lowerCAmelCase__ = value[0]
else:
lowerCAmelCase__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
lowerCAmelCase__ = value
elif weight_type == "weight_g":
lowerCAmelCase__ = value
elif weight_type == "weight_v":
lowerCAmelCase__ = value
elif weight_type == "bias":
lowerCAmelCase__ = value
elif weight_type == "param":
for attribute in hf_param_name.split("." ):
lowerCAmelCase__ = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase__ = value
else:
lowerCAmelCase__ = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _A ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowerCAmelCase_ ):
lowerCAmelCase__ = PARAM_MAPPING[full_name.split("." )[-1]]
lowerCAmelCase__ = "param"
if weight_type is not None and weight_type != "param":
lowerCAmelCase__ = ".".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
lowerCAmelCase__ = ".".join([key, hf_param_name] )
else:
lowerCAmelCase__ = key
lowerCAmelCase__ = value if "lm_head" in full_key else value[0]
UpperCamelCase = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def _A ( lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str=None , lowerCAmelCase_ : Any=None ):
"""simple docstring"""
lowerCAmelCase__ = False
for key, mapped_key in MAPPING.items():
lowerCAmelCase__ = "wav2vec2." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowerCAmelCase__ = True
if "*" in mapped_key:
lowerCAmelCase__ = name.split(lowerCAmelCase_ )[0].split("." )[-2]
lowerCAmelCase__ = mapped_key.replace("*" , lowerCAmelCase_ )
if "weight_g" in name:
lowerCAmelCase__ = "weight_g"
elif "weight_v" in name:
lowerCAmelCase__ = "weight_v"
elif "bias" in name:
lowerCAmelCase__ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCAmelCase__ = "weight"
else:
lowerCAmelCase__ = None
if hf_dict is not None:
rename_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
set_recursively(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return is_used
return is_used
def _A ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict ):
"""simple docstring"""
lowerCAmelCase__ = []
lowerCAmelCase__ = fairseq_model.state_dict()
lowerCAmelCase__ = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
lowerCAmelCase__ = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , hf_model.config.feat_extract_norm == "group" , )
lowerCAmelCase__ = True
else:
lowerCAmelCase__ = load_wavaveca_layer(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if not is_used:
unused_weights.append(lowerCAmelCase_ )
logger.warning(F'Unused weights: {unused_weights}' )
def _A ( lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = full_name.split("conv_layers." )[-1]
lowerCAmelCase__ = name.split("." )
lowerCAmelCase__ = int(items[0] )
lowerCAmelCase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
lowerCAmelCase__ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
lowerCAmelCase__ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
lowerCAmelCase__ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
lowerCAmelCase__ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(lowerCAmelCase_ )
@torch.no_grad()
def _A ( lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : List[str]=False ):
"""simple docstring"""
if config_path is not None:
lowerCAmelCase__ = WavaVecaConfig.from_pretrained(lowerCAmelCase_ )
else:
lowerCAmelCase__ = WavaVecaConfig()
if is_seq_class:
lowerCAmelCase__ = read_txt_into_dict(lowerCAmelCase_ )
lowerCAmelCase__ = idalabel
lowerCAmelCase__ = WavaVecaForSequenceClassification(lowerCAmelCase_ )
lowerCAmelCase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , )
feature_extractor.save_pretrained(lowerCAmelCase_ )
elif is_finetuned:
if dict_path:
lowerCAmelCase__ = Dictionary.load(lowerCAmelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCAmelCase__ = target_dict.pad_index
lowerCAmelCase__ = target_dict.bos_index
lowerCAmelCase__ = target_dict.eos_index
lowerCAmelCase__ = len(target_dict.symbols )
lowerCAmelCase__ = os.path.join(lowerCAmelCase_ , "vocab.json" )
if not os.path.isdir(lowerCAmelCase_ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowerCAmelCase_ ) )
return
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
lowerCAmelCase__ = target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCAmelCase__ = 0
lowerCAmelCase__ = 1
with open(lowerCAmelCase_ , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase__ = WavaVecaCTCTokenizer(
lowerCAmelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=lowerCAmelCase_ , )
lowerCAmelCase__ = True if config.feat_extract_norm == "layer" else False
lowerCAmelCase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , )
lowerCAmelCase__ = WavaVecaProcessor(feature_extractor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ )
processor.save_pretrained(lowerCAmelCase_ )
lowerCAmelCase__ = WavaVecaForCTC(lowerCAmelCase_ )
else:
lowerCAmelCase__ = WavaVecaForPreTraining(lowerCAmelCase_ )
if is_finetuned or is_seq_class:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
lowerCAmelCase__ = argparse.Namespace(task="audio_pretraining" )
lowerCAmelCase__ = fairseq.tasks.setup_task(lowerCAmelCase_ )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCAmelCase_ )
lowerCAmelCase__ = model[0].eval()
recursively_load_weights(lowerCAmelCase_ , lowerCAmelCase_ , not is_finetuned )
hf_wavavec.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
UpperCamelCase = parser.parse_args()
UpperCamelCase = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 221 | 0 |
from collections.abc import Callable
class __A :
'''simple docstring'''
def __init__( self , __lowerCAmelCase = None ):
'''simple docstring'''
lowerCamelCase__ = []
# Stores indexes of each item for supporting updates and deletion.
lowerCamelCase__ = {}
# Stores current size of heap.
lowerCamelCase__ = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
lowerCamelCase__ = key or (lambda __lowerCAmelCase : x)
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
return int((i - 1) / 2 ) if i > 0 else None
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = int(2 * i + 1 )
return left if 0 < left < self.size else None
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = int(2 * i + 2 )
return right if 0 < right < self.size else None
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
lowerCamelCase__ = self.arr[j], self.arr[i]
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
return self.arr[i][1] < self.arr[j][1]
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self._left(_a )
lowerCamelCase__ = self._right(_a )
lowerCamelCase__ = i
if left is not None and not self._cmp(_a , _a ):
lowerCamelCase__ = left
if right is not None and not self._cmp(_a , _a ):
lowerCamelCase__ = right
return valid_parent
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self._parent(_a )
while parent is not None and not self._cmp(_a , _a ):
self._swap(_a , _a )
lowerCamelCase__ = parent, self._parent(_a )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self._get_valid_parent(_a )
while valid_parent != index:
self._swap(_a , _a )
lowerCamelCase__ = valid_parent, self._get_valid_parent(_a )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
if item not in self.pos_map:
return
lowerCamelCase__ = self.pos_map[item]
lowerCamelCase__ = [item, self.key(_a )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(_a )
self._heapify_down(_a )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
if item not in self.pos_map:
return
lowerCamelCase__ = self.pos_map[item]
del self.pos_map[item]
lowerCamelCase__ = self.arr[self.size - 1]
lowerCamelCase__ = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(_a )
self._heapify_down(_a )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(_a )] )
else:
lowerCamelCase__ = [item, self.key(_a )]
lowerCamelCase__ = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.arr[0] if self.size else None
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def lowerCAmelCase__() -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 209 |
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = (DDPMScheduler,)
def __lowercase ( self , **_a ) -> Any:
_a : List[Any] = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**_a )
return config
def __lowercase ( self ) -> Any:
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=_a )
def __lowercase ( self ) -> List[Any]:
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_a , beta_end=_a )
def __lowercase ( self ) -> List[str]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_a )
def __lowercase ( self ) -> Optional[Any]:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_a )
def __lowercase ( self ) -> str:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_a )
def __lowercase ( self ) -> Dict:
self.check_over_configs(thresholding=_a )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_a , prediction_type=_a , sample_max_value=_a , )
def __lowercase ( self ) -> Optional[Any]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_a )
def __lowercase ( self ) -> int:
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=_a )
def __lowercase ( self ) -> int:
_a : int = self.scheduler_classes[0]
_a : List[Any] = self.get_scheduler_config()
_a : Dict = scheduler_class(**_a )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_0979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1e-5
def __lowercase ( self ) -> Tuple:
_a : int = self.scheduler_classes[0]
_a : int = self.get_scheduler_config()
_a : int = scheduler_class(**_a )
_a : Optional[int] = len(_a )
_a : Optional[Any] = self.dummy_model()
_a : str = self.dummy_sample_deter
_a : Union[str, Any] = torch.manual_seed(0 )
for t in reversed(range(_a ) ):
# 1. predict noise residual
_a : str = model(_a , _a )
# 2. predict previous mean of sample x_t-1
_a : Optional[int] = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_a : List[Any] = pred_prev_sample
_a : str = torch.sum(torch.abs(_a ) )
_a : Optional[Any] = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def __lowercase ( self ) -> Optional[Any]:
_a : Optional[int] = self.scheduler_classes[0]
_a : Optional[Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
_a : Union[str, Any] = scheduler_class(**_a )
_a : Dict = len(_a )
_a : int = self.dummy_model()
_a : Tuple = self.dummy_sample_deter
_a : List[Any] = torch.manual_seed(0 )
for t in reversed(range(_a ) ):
# 1. predict noise residual
_a : Dict = model(_a , _a )
# 2. predict previous mean of sample x_t-1
_a : int = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_a : str = pred_prev_sample
_a : str = torch.sum(torch.abs(_a ) )
_a : Tuple = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def __lowercase ( self ) -> Dict:
_a : Union[str, Any] = self.scheduler_classes[0]
_a : Tuple = self.get_scheduler_config()
_a : Any = scheduler_class(**_a )
_a : Optional[Any] = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=_a )
_a : Optional[int] = scheduler.timesteps
for i, timestep in enumerate(_a ):
if i == len(_a ) - 1:
_a : Dict = -1
else:
_a : Tuple = timesteps[i + 1]
_a : Optional[Any] = scheduler.previous_timestep(_a )
_a : Optional[Any] = prev_t.item()
self.assertEqual(_a , _a )
def __lowercase ( self ) -> Optional[Any]:
_a : Dict = self.scheduler_classes[0]
_a : List[str] = self.get_scheduler_config()
_a : Tuple = scheduler_class(**_a )
_a : str = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(_a , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=_a )
def __lowercase ( self ) -> str:
_a : List[str] = self.scheduler_classes[0]
_a : List[str] = self.get_scheduler_config()
_a : Dict = scheduler_class(**_a )
_a : Union[str, Any] = [1_0_0, 8_7, 5_0, 1, 0]
_a : Optional[Any] = len(_a )
with self.assertRaises(_a , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=_a , timesteps=_a )
def __lowercase ( self ) -> Optional[int]:
_a : Dict = self.scheduler_classes[0]
_a : Union[str, Any] = self.get_scheduler_config()
_a : int = scheduler_class(**_a )
_a : str = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_a , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=_a )
| 235 | 0 |
def a__ ( ):
return [
a * b * (1_0_0_0 - a - b)
for a in range(1, 9_9_9 )
for b in range(A__, 9_9_9 )
if (a * a + b * b == (1_0_0_0 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 162 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
lowerCAmelCase__ : Optional[Any] =logging.getLogger(__name__)
def a__ ( A__, A__ ):
SCREAMING_SNAKE_CASE_ : Any = np.argmax(A__, axis=1 )
return np.sum(outputs == labels )
def a__ ( A__ ):
with open(A__, encoding='utf_8' ) as f:
SCREAMING_SNAKE_CASE_ : int = csv.reader(A__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
next(A__ ) # skip the first line
for line in tqdm(A__ ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def a__ ( A__, A__, A__, A__, A__, A__ ):
SCREAMING_SNAKE_CASE_ : str = []
for dataset in encoded_datasets:
SCREAMING_SNAKE_CASE_ : str = len(A__ )
SCREAMING_SNAKE_CASE_ : List[Any] = np.zeros((n_batch, 2, input_len), dtype=np.intaa )
SCREAMING_SNAKE_CASE_ : str = np.zeros((n_batch, 2), dtype=np.intaa )
SCREAMING_SNAKE_CASE_ : Optional[int] = np.full((n_batch, 2, input_len), fill_value=-1_0_0, dtype=np.intaa )
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.zeros((n_batch,), dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(A__ ):
SCREAMING_SNAKE_CASE_ : List[str] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
SCREAMING_SNAKE_CASE_ : Any = with_conta
SCREAMING_SNAKE_CASE_ : Union[str, Any] = with_conta
SCREAMING_SNAKE_CASE_ : Dict = len(A__ ) - 1
SCREAMING_SNAKE_CASE_ : str = len(A__ ) - 1
SCREAMING_SNAKE_CASE_ : Any = with_conta
SCREAMING_SNAKE_CASE_ : str = with_conta
SCREAMING_SNAKE_CASE_ : List[str] = mc_label
SCREAMING_SNAKE_CASE_ : Any = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(A__ ) for t in all_inputs ) )
return tensor_datasets
def a__ ( ):
SCREAMING_SNAKE_CASE_ : Any = argparse.ArgumentParser()
parser.add_argument('--model_name', type=A__, default='openai-gpt', help='pretrained model name' )
parser.add_argument('--do_train', action='store_true', help='Whether to run training.' )
parser.add_argument('--do_eval', action='store_true', help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir', default=A__, type=A__, required=A__, help='The output directory where the model predictions and checkpoints will be written.', )
parser.add_argument('--train_dataset', type=A__, default='' )
parser.add_argument('--eval_dataset', type=A__, default='' )
parser.add_argument('--seed', type=A__, default=4_2 )
parser.add_argument('--num_train_epochs', type=A__, default=3 )
parser.add_argument('--train_batch_size', type=A__, default=8 )
parser.add_argument('--eval_batch_size', type=A__, default=1_6 )
parser.add_argument('--adam_epsilon', default=1E-8, type=A__, help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm', type=A__, default=1 )
parser.add_argument(
'--max_steps', default=-1, type=A__, help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
), )
parser.add_argument(
'--gradient_accumulation_steps', type=A__, default=1, help='Number of updates steps to accumulate before performing a backward/update pass.', )
parser.add_argument('--learning_rate', type=A__, default=6.25E-5 )
parser.add_argument('--warmup_steps', default=0, type=A__, help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule', type=A__, default='warmup_linear' )
parser.add_argument('--weight_decay', type=A__, default=0.01 )
parser.add_argument('--lm_coef', type=A__, default=0.9 )
parser.add_argument('--n_valid', type=A__, default=3_7_4 )
parser.add_argument('--server_ip', type=A__, default='', help='Can be used for distant debugging.' )
parser.add_argument('--server_port', type=A__, default='', help='Can be used for distant debugging.' )
SCREAMING_SNAKE_CASE_ : Any = parser.parse_args()
print(A__ )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=A__ )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
SCREAMING_SNAKE_CASE_ : str = torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(A__, A__ ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
SCREAMING_SNAKE_CASE_ : List[Any] = ['_start_', '_delimiter_', '_classify_']
SCREAMING_SNAKE_CASE_ : Dict = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(A__ )
SCREAMING_SNAKE_CASE_ : int = tokenizer.convert_tokens_to_ids(A__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(A__ ) )
model.to(A__ )
# Load and encode the datasets
def tokenize_and_encode(A__ ):
if isinstance(A__, A__ ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(A__ ) )
elif isinstance(A__, A__ ):
return obj
return [tokenize_and_encode(A__ ) for o in obj]
logger.info('Encoding dataset...' )
SCREAMING_SNAKE_CASE_ : int = load_rocstories_dataset(args.train_dataset )
SCREAMING_SNAKE_CASE_ : int = load_rocstories_dataset(args.eval_dataset )
SCREAMING_SNAKE_CASE_ : Optional[Any] = (train_dataset, eval_dataset)
SCREAMING_SNAKE_CASE_ : List[str] = tokenize_and_encode(A__ )
# Compute the max input length for the Transformer
SCREAMING_SNAKE_CASE_ : Tuple = model.config.n_positions // 2 - 2
SCREAMING_SNAKE_CASE_ : Optional[int] = max(
len(story[:max_length] ) + max(len(conta[:max_length] ), len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
SCREAMING_SNAKE_CASE_ : str = min(A__, model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
SCREAMING_SNAKE_CASE_ : Tuple = pre_process_datasets(A__, A__, A__, *A__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = tensor_datasets[0], tensor_datasets[1]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TensorDataset(*A__ )
SCREAMING_SNAKE_CASE_ : str = RandomSampler(A__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DataLoader(A__, sampler=A__, batch_size=args.train_batch_size )
SCREAMING_SNAKE_CASE_ : List[Any] = TensorDataset(*A__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = SequentialSampler(A__ )
SCREAMING_SNAKE_CASE_ : str = DataLoader(A__, sampler=A__, batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
SCREAMING_SNAKE_CASE_ : int = args.max_steps
SCREAMING_SNAKE_CASE_ : Any = args.max_steps // (len(A__ ) // args.gradient_accumulation_steps) + 1
else:
SCREAMING_SNAKE_CASE_ : List[Any] = len(A__ ) // args.gradient_accumulation_steps * args.num_train_epochs
SCREAMING_SNAKE_CASE_ : Optional[Any] = list(model.named_parameters() )
SCREAMING_SNAKE_CASE_ : Optional[int] = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
SCREAMING_SNAKE_CASE_ : Optional[Any] = AdamW(A__, lr=args.learning_rate, eps=args.adam_epsilon )
SCREAMING_SNAKE_CASE_ : List[Any] = get_linear_schedule_with_warmup(
A__, num_warmup_steps=args.warmup_steps, num_training_steps=A__ )
if args.do_train:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ), desc='Epoch' ):
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : str = 0
SCREAMING_SNAKE_CASE_ : List[Any] = tqdm(A__, desc='Training' )
for step, batch in enumerate(A__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = tuple(t.to(A__ ) for t in batch )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = batch
SCREAMING_SNAKE_CASE_ : Tuple = model(A__, mc_token_ids=A__, lm_labels=A__, mc_labels=A__ )
SCREAMING_SNAKE_CASE_ : str = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
SCREAMING_SNAKE_CASE_ : Tuple = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
SCREAMING_SNAKE_CASE_ : List[str] = 'Training loss: {:.2e} lr: {:.2e}'.format(A__, scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
SCREAMING_SNAKE_CASE_ : List[str] = model.module if hasattr(A__, 'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(args.output_dir, A__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(args.output_dir, A__ )
torch.save(model_to_save.state_dict(), A__ )
model_to_save.config.to_json_file(A__ )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
SCREAMING_SNAKE_CASE_ : Dict = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
SCREAMING_SNAKE_CASE_ : int = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(A__ )
if args.do_eval:
model.eval()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = 0, 0
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = 0, 0
for batch in tqdm(A__, desc='Evaluating' ):
SCREAMING_SNAKE_CASE_ : int = tuple(t.to(A__ ) for t in batch )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = batch
with torch.no_grad():
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = model(
A__, mc_token_ids=A__, lm_labels=A__, mc_labels=A__ )
SCREAMING_SNAKE_CASE_ : List[Any] = mc_logits.detach().cpu().numpy()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = mc_labels.to('cpu' ).numpy()
SCREAMING_SNAKE_CASE_ : Dict = accuracy(A__, A__ )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
SCREAMING_SNAKE_CASE_ : List[str] = eval_loss / nb_eval_steps
SCREAMING_SNAKE_CASE_ : List[Any] = eval_accuracy / nb_eval_examples
SCREAMING_SNAKE_CASE_ : List[Any] = tr_loss / nb_tr_steps if args.do_train else None
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
SCREAMING_SNAKE_CASE_ : int = os.path.join(args.output_dir, 'eval_results.txt' )
with open(A__, 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s', A__, str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 162 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
__UpperCAmelCase = 250004
__UpperCAmelCase = 250020
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = MBartTokenizer
SCREAMING_SNAKE_CASE__ = MBartTokenizerFast
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE : List[str] = MBartTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = MBartTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCamelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
SCREAMING_SNAKE_CASE : Dict = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
SCREAMING_SNAKE_CASE : List[str] = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE : Any = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = self.tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : Any = tokenizer_r.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
SCREAMING_SNAKE_CASE : List[str] = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(lowerCamelCase_ , lowerCamelCase_ )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE : List[str] = tokenizer_r.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCamelCase_ )
# Save tokenizer rust, legacy_format=True
SCREAMING_SNAKE_CASE : Any = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r.save_pretrained(lowerCamelCase_ , legacy_format=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCamelCase_ , lowerCamelCase_ )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_r.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
shutil.rmtree(lowerCamelCase_ )
# Save tokenizer rust, legacy_format=False
SCREAMING_SNAKE_CASE : str = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : Any = tokenizer_r.save_pretrained(lowerCamelCase_ , legacy_format=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
shutil.rmtree(lowerCamelCase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''facebook/mbart-large-en-ro'''
SCREAMING_SNAKE_CASE__ = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
SCREAMING_SNAKE_CASE__ = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
SCREAMING_SNAKE_CASE__ = [8274, 12_7873, 2_5916, 7, 8622, 2071, 438, 6_7485, 53, 18_7895, 23, 5_1712, 2, EN_CODE]
@classmethod
def lowerCamelCase_ ( cls : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
SCREAMING_SNAKE_CASE : Optional[Any] = 1
return cls
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 25_00_20 )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
self.assertIn(lowerCamelCase_ , self.tokenizer.all_special_ids )
SCREAMING_SNAKE_CASE : List[Any] = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer.decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertNotIn(self.tokenizer.eos_token , lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = 10
SCREAMING_SNAKE_CASE : int = self.tokenizer(lowerCamelCase_ , max_length=lowerCamelCase_ , truncation=lowerCamelCase_ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , lowerCamelCase_ )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [25_00_26, 25_00_01] )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : Any = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = MBartTokenizer.from_pretrained(lowerCamelCase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCamelCase_ )
@require_torch
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCamelCase_ , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE : Optional[Any] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
SCREAMING_SNAKE_CASE : Optional[int] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
SCREAMING_SNAKE_CASE : Union[str, Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCamelCase_ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(self.src_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=3 , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(
text_target=self.tgt_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=10 , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE : str = targets["""input_ids"""]
SCREAMING_SNAKE_CASE : Optional[Any] = shift_tokens_right(lowerCamelCase_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , {
# A, test, EOS, en_XX
"""input_ids""": [[62, 30_34, 2, 25_00_04]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 25_00_01,
} , )
| 323 |
'''simple docstring'''
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""",
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''efficientnet'''
def __init__( self : Tuple , lowerCamelCase_ : int = 3 , lowerCamelCase_ : int = 6_00 , lowerCamelCase_ : float = 2.0 , lowerCamelCase_ : float = 3.1 , lowerCamelCase_ : int = 8 , lowerCamelCase_ : List[int] = [3, 3, 5, 3, 5, 5, 3] , lowerCamelCase_ : List[int] = [32, 16, 24, 40, 80, 1_12, 1_92] , lowerCamelCase_ : List[int] = [16, 24, 40, 80, 1_12, 1_92, 3_20] , lowerCamelCase_ : List[int] = [] , lowerCamelCase_ : List[int] = [1, 2, 2, 2, 1, 2, 1] , lowerCamelCase_ : List[int] = [1, 2, 2, 3, 3, 4, 1] , lowerCamelCase_ : List[int] = [1, 6, 6, 6, 6, 6, 6] , lowerCamelCase_ : float = 0.25 , lowerCamelCase_ : str = "swish" , lowerCamelCase_ : int = 25_60 , lowerCamelCase_ : str = "mean" , lowerCamelCase_ : float = 0.02 , lowerCamelCase_ : float = 0.001 , lowerCamelCase_ : float = 0.99 , lowerCamelCase_ : float = 0.5 , lowerCamelCase_ : float = 0.2 , **lowerCamelCase_ : int , ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : int = width_coefficient
SCREAMING_SNAKE_CASE : List[str] = depth_coefficient
SCREAMING_SNAKE_CASE : Optional[Any] = depth_divisor
SCREAMING_SNAKE_CASE : List[str] = kernel_sizes
SCREAMING_SNAKE_CASE : Dict = in_channels
SCREAMING_SNAKE_CASE : List[str] = out_channels
SCREAMING_SNAKE_CASE : Any = depthwise_padding
SCREAMING_SNAKE_CASE : Dict = strides
SCREAMING_SNAKE_CASE : Optional[Any] = num_block_repeats
SCREAMING_SNAKE_CASE : Any = expand_ratios
SCREAMING_SNAKE_CASE : Union[str, Any] = squeeze_expansion_ratio
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dim
SCREAMING_SNAKE_CASE : List[str] = pooling_type
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Any = batch_norm_eps
SCREAMING_SNAKE_CASE : Union[str, Any] = batch_norm_momentum
SCREAMING_SNAKE_CASE : Dict = dropout_rate
SCREAMING_SNAKE_CASE : int = drop_connect_rate
SCREAMING_SNAKE_CASE : Optional[Any] = sum(lowerCamelCase_ ) * 4
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = version.parse('''1.11''' )
@property
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return 1e-5
| 323 | 1 |
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_UpperCamelCase : List[str] = 16
_UpperCamelCase : List[str] = 32
def snake_case (A_ :List[str] ):
'''simple docstring'''
return int(x / 2**2_0 )
class snake_case :
def __enter__( self : int ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
a : Dict = torch.cuda.memory_allocated()
return self
def __exit__( self : Optional[Any] , *A : Union[str, Any] ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
a : str = torch.cuda.memory_allocated()
a : Optional[int] = torch.cuda.max_memory_allocated()
a : int = bamb(self.end - self.begin )
a : Union[str, Any] = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def snake_case (A_ :Accelerator , A_ :int = 1_6 , A_ :str = "bert-base-cased" , A_ :int = 3_2_0 , A_ :int = 1_6_0 , ):
'''simple docstring'''
a : int = AutoTokenizer.from_pretrained(A_ )
a : Union[str, Any] = load_dataset(
'glue' , 'mrpc' , split={'train': f'''train[:{n_train}]''', 'validation': f'''validation[:{n_val}]'''} )
def tokenize_function(A_ :int ):
# max_length=None => use the model max length (it's actually the default)
a : List[str] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=A_ , max_length=A_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
a : Tuple = datasets.map(
A_ , batched=A_ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=A_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
a : Optional[Any] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(A_ :Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A_ , padding='max_length' , max_length=1_2_8 , return_tensors='pt' )
return tokenizer.pad(A_ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
a : Optional[Any] = DataLoader(
tokenized_datasets['train'] , shuffle=A_ , collate_fn=A_ , batch_size=A_ )
a : int = DataLoader(
tokenized_datasets['validation'] , shuffle=A_ , collate_fn=A_ , batch_size=A_ )
return train_dataloader, eval_dataloader
def snake_case (A_ :Optional[int] , A_ :Optional[int] ):
'''simple docstring'''
a : Any = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a : Tuple = config['lr']
a : Dict = int(config['num_epochs'] )
a : Any = int(config['seed'] )
a : int = int(config['batch_size'] )
a : List[str] = args.model_name_or_path
set_seed(A_ )
a : Optional[int] = get_dataloaders(A_ , A_ , A_ , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(A_ , return_dict=A_ )
# Instantiate optimizer
a : Union[str, Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
a : Union[str, Any] = optimizer_cls(params=model.parameters() , lr=A_ )
if accelerator.state.deepspeed_plugin is not None:
a : List[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
a : Optional[int] = 1
a : Union[str, Any] = (len(A_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
a : Any = get_linear_schedule_with_warmup(
optimizer=A_ , num_warmup_steps=0 , num_training_steps=A_ , )
else:
a : Optional[Any] = DummyScheduler(A_ , total_num_steps=A_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a : Any = accelerator.prepare(
A_ , A_ , A_ , A_ , A_ )
# We need to keep track of how many total steps we have iterated over
a : Optional[Any] = 0
# We also need to keep track of the stating epoch so files are named properly
a : List[Any] = 0
# Now we train the model
a : Union[str, Any] = {}
for epoch in range(A_ , A_ ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(A_ ):
a : Any = model(**A_ )
a : int = outputs.loss
a : Any = loss / gradient_accumulation_steps
accelerator.backward(A_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('Memory before entering the train : {}'.format(bamb(tracemalloc.begin ) ) )
accelerator.print('Memory consumed at the end of the train (end-begin): {}'.format(tracemalloc.used ) )
accelerator.print('Peak Memory consumed during the train (max-begin): {}'.format(tracemalloc.peaked ) )
accelerator.print(
'Total Peak Memory consumed during the train (max): {}'.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
a : Tuple = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f'''epoch-{epoch}'''] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'peak_memory_utilization.json' ) , 'w' ) as f:
json.dump(A_ , A_ )
def snake_case ():
'''simple docstring'''
a : List[str] = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=A_ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=A_ , )
parser.add_argument(
'--output_dir' , type=A_ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--peak_memory_upper_bound' , type=A_ , default=A_ , help='The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.' , )
parser.add_argument(
'--n_train' , type=A_ , default=3_2_0 , help='Number of training examples to use.' , )
parser.add_argument(
'--n_val' , type=A_ , default=1_6_0 , help='Number of validation examples to use.' , )
parser.add_argument(
'--num_epochs' , type=A_ , default=1 , help='Number of train epochs.' , )
a : int = parser.parse_args()
a : Union[str, Any] = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 4_2, 'batch_size': 1_6}
training_function(A_ , A_ )
if __name__ == "__main__":
main()
| 361 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_UpperCamelCase : Optional[int] = {
'configuration_rag': ['RagConfig'],
'retrieval_rag': ['RagRetriever'],
'tokenization_rag': ['RagTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Dict = [
'RagModel',
'RagPreTrainedModel',
'RagSequenceForGeneration',
'RagTokenForGeneration',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Optional[int] = [
'TFRagModel',
'TFRagPreTrainedModel',
'TFRagSequenceForGeneration',
'TFRagTokenForGeneration',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
_UpperCamelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 186 | 0 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase_ ( _lowercase : Tuple , _lowercase : List[Any] , _lowercase : List[Any]) -> List[str]:
"""simple docstring"""
# Initialise PyTorch model
a__ : Optional[int] = TaConfig.from_json_file(_lowercase)
print(F'''Building PyTorch model from configuration: {config}''')
a__ : Optional[int] = TaForConditionalGeneration(_lowercase)
# Load weights from tf checkpoint
load_tf_weights_in_ta(_lowercase , _lowercase , _lowercase)
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''')
model.save_pretrained(_lowercase)
if __name__ == "__main__":
_lowercase : int =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_lowercase : Optional[Any] =parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 170 |
from __future__ import annotations
import math
def lowerCAmelCase_ ( _lowercase : int) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_lowercase) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
_lowercase : str =[num for num in range(3, 10_0001, 2) if not is_prime(num)]
def lowerCAmelCase_ ( _lowercase : int) -> list[int]:
"""simple docstring"""
if not isinstance(_lowercase , _lowercase):
raise ValueError("""n must be an integer""")
if n <= 0:
raise ValueError("""n must be >= 0""")
a__ : int = []
for num in range(len(_lowercase)):
a__ : Any = 0
while 2 * i * i <= odd_composites[num]:
a__ : Union[str, Any] = odd_composites[num] - 2 * i * i
if is_prime(_lowercase):
break
i += 1
else:
list_nums.append(odd_composites[num])
if len(_lowercase) == n:
return list_nums
return []
def lowerCAmelCase_ ( ) -> int:
"""simple docstring"""
return compute_nums(1)[0]
if __name__ == "__main__":
print(f'{solution() = }')
| 170 | 1 |
from __future__ import annotations
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
__magic_name__ : List[str] = 0
__magic_name__ : str = len(_A ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
__magic_name__ : Any = i + 1
else:
__magic_name__ : Optional[Any] = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{two_pointer([2, 7, 11, 15], 9) = }""")
| 355 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__magic_name__: int = logging.get_logger(__name__)
__magic_name__: Any = {"vocab_file": "vocab.txt", "emoji_file": "emoji.json"}
__magic_name__: Union[str, Any] = {
"vocab_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt",
},
"emoji_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json",
},
}
__magic_name__: Optional[Any] = {
"abeja/gpt-neox-japanese-2.7b": 2_048,
}
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
with open(_A, """r""", encoding="""utf-8""" ) as f:
__magic_name__ : Tuple = json.loads(f.read() )
__magic_name__ : Optional[Any] = collections.OrderedDict()
__magic_name__ : List[str] = collections.OrderedDict()
__magic_name__ : int = collections.OrderedDict()
with open(_A, """r""", encoding="""utf-8""" ) as f:
__magic_name__ : Tuple = f.readlines()
__magic_name__ : Optional[int] = [[t.rstrip("""\n""" )] if (t == """,""" or """,""" not in t) else t.rstrip("""\n""" ).split(""",""" ) for t in token]
for idx, b in enumerate(_A ):
__magic_name__ : Dict = b
__magic_name__ : Optional[Any] = idx
for wd in b:
__magic_name__ : Dict = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : Union[str, Any] = VOCAB_FILES_NAMES
lowercase__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : List[str] = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="<|endoftext|>" , lowerCAmelCase__="<|endoftext|>" , lowerCAmelCase__="<|startoftext|>" , lowerCAmelCase__="<|endoftext|>" , lowerCAmelCase__=False , **lowerCAmelCase__ , ) -> str:
super().__init__(
unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , do_clean_text=lowerCAmelCase__ , **lowerCAmelCase__ , )
if not os.path.isfile(lowerCAmelCase__ ):
raise ValueError(
F'Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'
""" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
if not os.path.isfile(lowerCAmelCase__ ):
raise ValueError(
F'Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'
""" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
__magic_name__ : Dict = do_clean_text
__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ : Union[str, Any] = load_vocab_and_emoji(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : str = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def __magic_name__ ( self ) -> Union[str, Any]:
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def __magic_name__ ( self ) -> List[Any]:
return dict(self.raw_vocab , **self.added_tokens_encoder )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Any:
return self.subword_tokenizer.tokenize(lowerCAmelCase__ , clean=self.do_clean_text )
def __magic_name__ ( self , lowerCAmelCase__ ) -> int:
return self.vocab.get(lowerCAmelCase__ , self.vocab.get(self.unk_token ) )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Dict:
return self.subword_tokenizer.convert_id_to_token(lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Any:
__magic_name__ : int = """""".join(lowerCAmelCase__ ).strip()
return out_string
def __magic_name__ ( self , lowerCAmelCase__ ) -> List[int]:
__magic_name__ : Optional[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) + [self.eos_token_id] )
if len(lowerCAmelCase__ ) > self.model_max_length:
__magic_name__ : int = input_ids[-self.model_max_length :]
return input_ids
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
__magic_name__ : Dict = 0
if os.path.isdir(lowerCAmelCase__ ):
__magic_name__ : Union[str, Any] = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__magic_name__ : Optional[Any] = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""emoji_file"""] )
else:
__magic_name__ : Optional[Any] = (
(filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""vocab_file"""]
)
__magic_name__ : Any = (
(filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""emoji_file"""]
)
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
""" Please check that the vocabulary is not corrupted!""" )
__magic_name__ : Optional[Any] = token_index
writer.write(""",""".join(lowerCAmelCase__ ) + """\n""" )
index += 1
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as writer:
json.dump(self.emoji , lowerCAmelCase__ )
return vocab_file, emoji_file
class snake_case__ ( _lowerCAmelCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
__magic_name__ : str = vocab # same as swe
__magic_name__ : int = ids_to_tokens # same as bpe
__magic_name__ : List[str] = emoji
__magic_name__ : Optional[Any] = np.max([len(lowerCAmelCase__ ) for w in self.vocab.keys()] )
__magic_name__ : str = re.compile(R"""(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)""" )
__magic_name__ : str = re.compile(R"""[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*""" )
__magic_name__ : Any = re.compile(R"""[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}""" )
__magic_name__ : List[str] = re.compile(
R"""([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
__magic_name__ : Dict = re.compile(
R"""(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
__magic_name__ : List[Any] = re.compile(
R"""((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*""" )
__magic_name__ : str = """─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"""
__magic_name__ : Dict = """▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"""
__magic_name__ : Optional[int] = str.maketrans({k: """<BLOCK>""" for k in keisen + blocks} )
def __len__( self ) -> int:
return len(self.ids_to_tokens )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Optional[int]:
__magic_name__ : Any = self.content_repattera.sub("""<URL>""" , lowerCAmelCase__ )
__magic_name__ : Any = self.content_repattera.sub("""<EMAIL>""" , lowerCAmelCase__ )
__magic_name__ : Dict = self.content_repattera.sub("""<TEL>""" , lowerCAmelCase__ )
__magic_name__ : Tuple = self.content_repattera.sub("""<DATE>""" , lowerCAmelCase__ )
__magic_name__ : str = self.content_repattera.sub("""<DATE>""" , lowerCAmelCase__ )
__magic_name__ : Tuple = self.content_repattera.sub("""<PRICE>""" , lowerCAmelCase__ )
__magic_name__ : Dict = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
__magic_name__ : str = content.replace("""<BLOCK><BLOCK>""" , """<BLOCK>""" )
return content
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__=False ) -> Any:
__magic_name__ : Union[str, Any] = text.replace(""" """ , """<SP>""" )
__magic_name__ : Optional[int] = text.replace(""" """ , """<SP>""" )
__magic_name__ : int = text.replace("""\r\n""" , """<BR>""" )
__magic_name__ : Optional[Any] = text.replace("""\n""" , """<BR>""" )
__magic_name__ : List[Any] = text.replace("""\r""" , """<BR>""" )
__magic_name__ : List[str] = text.replace("""\t""" , """<TAB>""" )
__magic_name__ : Optional[Any] = text.replace("""—""" , """ー""" )
__magic_name__ : int = text.replace("""−""" , """ー""" )
for k, v in self.emoji["emoji"].items():
if k in text:
__magic_name__ : List[Any] = text.replace(lowerCAmelCase__ , lowerCAmelCase__ )
if clean:
__magic_name__ : List[str] = self.clean_text(lowerCAmelCase__ )
def check_simbol(lowerCAmelCase__ ):
__magic_name__ : int = x.encode()
if len(lowerCAmelCase__ ) == 1 and len(lowerCAmelCase__ ) == 2:
__magic_name__ : str = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0Xc2_a1 and c <= 0Xc2_bf)
or (c >= 0Xc7_80 and c <= 0Xc7_83)
or (c >= 0Xca_b9 and c <= 0Xcb_bf)
or (c >= 0Xcc_80 and c <= 0Xcd_a2)
):
return True
return False
def checkuae(lowerCAmelCase__ ):
__magic_name__ : List[Any] = x.encode()
if len(lowerCAmelCase__ ) == 1 and len(lowerCAmelCase__ ) == 3:
__magic_name__ : Dict = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0Xe2_80_80 and c <= 0Xe2_b0_7f:
return True
return False
__magic_name__ : Optional[Any] = 0
__magic_name__ : List[str] = []
while pos < len(lowerCAmelCase__ ):
__magic_name__ : Optional[int] = min(len(lowerCAmelCase__ ) , pos + self.maxlen + 1 ) if text[pos] == """<""" else pos + 3
__magic_name__ : int = [] # (token_id, token, pos)
for e in range(lowerCAmelCase__ , lowerCAmelCase__ , -1 ):
__magic_name__ : str = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(lowerCAmelCase__ ) > 2:
__magic_name__ : Union[str, Any] = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(lowerCAmelCase__ ) > 0:
# the smallest token_id is adopted
__magic_name__ ,__magic_name__ ,__magic_name__ : str = sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : x[0] )[0]
result.append(lowerCAmelCase__ )
__magic_name__ : List[Any] = e
else:
__magic_name__ : Dict = pos + 1
__magic_name__ : str = text[pos:end]
if check_simbol(lowerCAmelCase__ ):
result.append("""<KIGOU>""" )
elif checkuae(lowerCAmelCase__ ):
result.append("""<U2000U2BFF>""" )
else:
for i in wd.encode("""utf-8""" ):
result.append("""<|byte%d|>""" % i )
__magic_name__ : List[str] = end
return result
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__="\n" ) -> Optional[int]:
__magic_name__ : Tuple = []
__magic_name__ : Tuple = []
__magic_name__ : str = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(lowerCAmelCase__ ) > 0:
words.append(bytearray(lowerCAmelCase__ ).decode("""utf-8""" , errors="""replace""" ) )
__magic_name__ : List[Any] = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["""emoji_inv"""][word] )
elif word == "<SP>":
words.append(""" """ )
elif word == "<BR>":
words.append(lowerCAmelCase__ )
elif word == "<TAB>":
words.append("""\t""" )
elif word == "<BLOCK>":
words.append("""▀""" )
elif word == "<KIGOU>":
words.append("""ǀ""" )
elif word == "<U2000U2BFF>":
words.append("""‖""" )
else:
words.append(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
words.append(bytearray(lowerCAmelCase__ ).decode("""utf-8""" , errors="""replace""" ) )
__magic_name__ : Union[str, Any] = """""".join(lowerCAmelCase__ )
return text
| 138 | 0 |
'''simple docstring'''
def lowerCamelCase__ ( _A ):
a : Optional[Any] = len(_A )
a : Optional[Any] = sum(_A )
a : Optional[int] = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
a : List[str] = True
for i in range(1 , s + 1 ):
a : Optional[int] = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
a : Dict = dp[i][j - 1]
if arr[i - 1] <= j:
a : Union[str, Any] = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
a : List[str] = s - 2 * j
break
return diff | 297 |
'''simple docstring'''
def lowerCamelCase__ ( _A , _A ):
while second != 0:
a : Union[str, Any] = first & second
first ^= second
a : Tuple = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase: Optional[int] = int(input('Enter the first number: ').strip())
lowerCAmelCase: Union[str, Any] = int(input('Enter the second number: ').strip())
print(F"{add(first, second) = }") | 297 | 1 |
def lowerCamelCase__ (__lowerCamelCase = 1, __lowerCamelCase = 1000 ):
_SCREAMING_SNAKE_CASE : int = 1
_SCREAMING_SNAKE_CASE : List[str] = 0
for divide_by_number in range(__lowerCamelCase, digit + 1 ):
_SCREAMING_SNAKE_CASE : list[int] = []
_SCREAMING_SNAKE_CASE : Dict = numerator
for _ in range(1, digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Tuple = len(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = divide_by_number
else:
has_been_divided.append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod() | 351 |
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
UpperCamelCase__ =logging.getLogger(__name__)
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None ) -> Optional[Any]:
super().__init__(
__lowerCamelCase , question_encoder_tokenizer=__lowerCamelCase , generator_tokenizer=__lowerCamelCase , index=__lowerCamelCase , init_retrieval=__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : List[Any] = None
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Any:
logger.info("initializing retrieval" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("dist initialized" )
# needs to be set manually
_SCREAMING_SNAKE_CASE : List[str] = self._infer_socket_ifname()
# avoid clash with the NCCL port
_SCREAMING_SNAKE_CASE : List[Any] = str(distributed_port + 1 )
_SCREAMING_SNAKE_CASE : int = dist.new_group(ranks=__lowerCamelCase , backend="gloo" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("dist not initialized / main" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def UpperCamelCase_ ( self ) -> Optional[Any]:
return dist.get_rank(group=self.process_group ) == 0
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=torch.floataa ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Optional[int] = torch.empty(__lowerCamelCase , dtype=__lowerCamelCase )
dist.scatter(__lowerCamelCase , src=0 , scatter_list=__lowerCamelCase , group=self.process_group )
return target_tensor
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : int = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
_SCREAMING_SNAKE_CASE : Any = next((addr for addr in addrs if addr.startswith("e" )) , __lowerCamelCase )
return ifname
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Tuple[np.ndarray, List[dict]]:
# single GPU training
if not dist.is_initialized():
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = self._main_retrieve(__lowerCamelCase , __lowerCamelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__lowerCamelCase )
# distributed training
_SCREAMING_SNAKE_CASE : Union[str, Any] = dist.get_world_size(group=self.process_group )
# gather logic
_SCREAMING_SNAKE_CASE : Any = None
if self._is_main():
_SCREAMING_SNAKE_CASE : Optional[Any] = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(__lowerCamelCase )]
dist.gather(torch.tensor(__lowerCamelCase ) , dst=0 , gather_list=__lowerCamelCase , group=self.process_group )
# scatter logic
_SCREAMING_SNAKE_CASE : Optional[int] = question_hidden_states.shape[0]
_SCREAMING_SNAKE_CASE : Optional[Any] = []
_SCREAMING_SNAKE_CASE : Optional[int] = []
if self._is_main():
assert len(__lowerCamelCase ) == world_size
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = self._main_retrieve(torch.cat(__lowerCamelCase ).numpy() , __lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = torch.tensor(__lowerCamelCase ), torch.tensor(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = self._chunk_tensor(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = self._chunk_tensor(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = self._scattered(__lowerCamelCase , [n_queries, n_docs] , target_type=torch.intaa )
_SCREAMING_SNAKE_CASE : Optional[Any] = self._scattered(__lowerCamelCase , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__lowerCamelCase ) | 325 | 0 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def __magic_name__ ( A : List[Any], A : List[str], A : Optional[Any] ):
'''simple docstring'''
if isinstance(A, torch.Tensor ):
return image
elif isinstance(A, PIL.Image.Image ):
a = [image]
if isinstance(image[0], PIL.Image.Image ):
a = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image]
a = np.concatenate(A, axis=0 )
a = np.array(A ).astype(np.floataa ) / 2_55.0
a = image.transpose(0, 3, 1, 2 )
a = 2.0 * image - 1.0
a = torch.from_numpy(A )
elif isinstance(image[0], torch.Tensor ):
a = torch.cat(A, dim=0 )
return image
def __magic_name__ ( A : str, A : Optional[int], A : Tuple, A : Tuple=0.99_95 ):
'''simple docstring'''
if not isinstance(A, np.ndarray ):
a = True
a = va.device
a = va.cpu().numpy()
a = va.cpu().numpy()
a = np.sum(va * va / (np.linalg.norm(A ) * np.linalg.norm(A )) )
if np.abs(A ) > DOT_THRESHOLD:
a = (1 - t) * va + t * va
else:
a = np.arccos(A )
a = np.sin(A )
a = theta_a * t
a = np.sin(A )
a = np.sin(theta_a - theta_t ) / sin_theta_a
a = sin_theta_t / sin_theta_a
a = sa * va + sa * va
if inputs_are_torch:
a = torch.from_numpy(A ).to(A )
return va
def __magic_name__ ( A : int, A : Optional[int] ):
'''simple docstring'''
a = F.normalize(A, dim=-1 )
a = F.normalize(A, dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def __magic_name__ ( A : List[str], A : str ):
'''simple docstring'''
for param in model.parameters():
a = value
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , __lowerCamelCase : AutoencoderKL , __lowerCamelCase : CLIPTextModel , __lowerCamelCase : CLIPModel , __lowerCamelCase : CLIPTokenizer , __lowerCamelCase : UNetaDConditionModel , __lowerCamelCase : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , __lowerCamelCase : CLIPFeatureExtractor , __lowerCamelCase : List[Any]=None , __lowerCamelCase : str=None , __lowerCamelCase : Tuple=None , ) -> Optional[int]:
super().__init__()
self.register_modules(
vae=__lowerCamelCase , text_encoder=__lowerCamelCase , clip_model=__lowerCamelCase , tokenizer=__lowerCamelCase , unet=__lowerCamelCase , scheduler=__lowerCamelCase , feature_extractor=__lowerCamelCase , coca_model=__lowerCamelCase , coca_tokenizer=__lowerCamelCase , coca_transform=__lowerCamelCase , )
a = (
feature_extractor.size
if isinstance(feature_extractor.size , __lowerCamelCase )
else feature_extractor.size["shortest_edge"]
)
a = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , __lowerCamelCase )
set_requires_grad(self.clip_model , __lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Optional[Union[str, int]] = "auto" ) -> List[Any]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
a = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__lowerCamelCase )
def __UpperCAmelCase ( self : Any ) -> List[Any]:
self.enable_attention_slicing(__lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
set_requires_grad(self.vae , __lowerCamelCase )
def __UpperCAmelCase ( self : int ) -> List[Any]:
set_requires_grad(self.vae , __lowerCamelCase )
def __UpperCAmelCase ( self : int ) -> List[Any]:
set_requires_grad(self.unet , __lowerCamelCase )
def __UpperCAmelCase ( self : Tuple ) -> str:
set_requires_grad(self.unet , __lowerCamelCase )
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : str , __lowerCamelCase : List[str] ) -> Optional[int]:
# get the original timestep using init_timestep
a = min(int(num_inference_steps * strength ) , __lowerCamelCase )
a = max(num_inference_steps - init_timestep , 0 )
a = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : str=None ) -> Dict:
if not isinstance(__lowerCamelCase , torch.Tensor ):
raise ValueError(f"""`image` has to be of type `torch.Tensor` but is {type(__lowerCamelCase )}""" )
a = image.to(device=__lowerCamelCase , dtype=__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
a = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__lowerCamelCase )
]
a = torch.cat(__lowerCamelCase , dim=0 )
else:
a = self.vae.encode(__lowerCamelCase ).latent_dist.sample(__lowerCamelCase )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
a = 0.18_215 * init_latents
a = init_latents.repeat_interleave(__lowerCamelCase , dim=0 )
a = randn_tensor(init_latents.shape , generator=__lowerCamelCase , device=__lowerCamelCase , dtype=__lowerCamelCase )
# get latents
a = self.scheduler.add_noise(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
a = init_latents
return latents
def __UpperCAmelCase ( self : str , __lowerCamelCase : Optional[int] ) -> int:
a = self.coca_transform(__lowerCamelCase ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
a = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
a = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("<end_of_text>" )[0].replace("<start_of_text>" , "" ).rstrip(" .," )
def __UpperCAmelCase ( self : int , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] ) -> Any:
a = self.feature_extractor.preprocess(__lowerCamelCase )
a = torch.from_numpy(clip_image_input["pixel_values"][0] ).unsqueeze(0 ).to(self.device ).half()
a = self.clip_model.get_image_features(__lowerCamelCase )
a = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__lowerCamelCase )
a = image_embeddings_clip.repeat_interleave(__lowerCamelCase , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def __UpperCAmelCase ( self : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , ) -> List[str]:
a = latents.detach().requires_grad_()
a = self.scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase )
# predict the noise residual
a = self.unet(__lowerCamelCase , __lowerCamelCase , encoder_hidden_states=__lowerCamelCase ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
a = self.scheduler.alphas_cumprod[timestep]
a = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
a = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
a = torch.sqrt(__lowerCamelCase )
a = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , __lowerCamelCase ):
a = self.scheduler.sigmas[index]
a = latents - sigma * noise_pred
else:
raise ValueError(f"""scheduler type {type(self.scheduler )} not supported""" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
a = 1 / 0.18_215 * sample
a = self.vae.decode(__lowerCamelCase ).sample
a = (image / 2 + 0.5).clamp(0 , 1 )
a = transforms.Resize(self.feature_extractor_size )(__lowerCamelCase )
a = self.normalize(__lowerCamelCase ).to(latents.dtype )
a = self.clip_model.get_image_features(__lowerCamelCase )
a = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__lowerCamelCase )
a = spherical_dist_loss(__lowerCamelCase , __lowerCamelCase ).mean() * clip_guidance_scale
a = -torch.autograd.grad(__lowerCamelCase , __lowerCamelCase )[0]
if isinstance(self.scheduler , __lowerCamelCase ):
a = latents.detach() + grads * (sigma**2)
a = noise_pred_original
else:
a = noise_pred_original - torch.sqrt(__lowerCamelCase ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self : Optional[int] , __lowerCamelCase : Union[torch.FloatTensor, PIL.Image.Image] , __lowerCamelCase : Union[torch.FloatTensor, PIL.Image.Image] , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[int] = 5_12 , __lowerCamelCase : Optional[int] = 5_12 , __lowerCamelCase : float = 0.6 , __lowerCamelCase : Optional[int] = 50 , __lowerCamelCase : Optional[float] = 7.5 , __lowerCamelCase : Optional[int] = 1 , __lowerCamelCase : float = 0.0 , __lowerCamelCase : Optional[float] = 1_00 , __lowerCamelCase : Optional[torch.Generator] = None , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , __lowerCamelCase : float = 0.8 , __lowerCamelCase : float = 0.1 , __lowerCamelCase : float = 0.1 , ) -> Dict:
if isinstance(__lowerCamelCase , __lowerCamelCase ) and len(__lowerCamelCase ) != batch_size:
raise ValueError(f"""You have passed {batch_size} batch_size, but only {len(__lowerCamelCase )} generators.""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if isinstance(__lowerCamelCase , torch.Generator ) and batch_size > 1:
a = [generator] + [None] * (batch_size - 1)
a = [
("model", self.coca_model is None),
("tokenizer", self.coca_tokenizer is None),
("transform", self.coca_transform is None),
]
a = [x[0] for x in coca_is_none if x[1]]
a = ", ".join(__lowerCamelCase )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(__lowerCamelCase ):
raise ValueError(
f"""Content prompt is None and CoCa [{coca_is_none_str}] is None."""
f"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
a = self.get_image_description(__lowerCamelCase )
if style_prompt is None:
if len(__lowerCamelCase ):
raise ValueError(
f"""Style prompt is None and CoCa [{coca_is_none_str}] is None."""
f""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
a = self.get_image_description(__lowerCamelCase )
# get prompt text embeddings for content and style
a = self.tokenizer(
__lowerCamelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=__lowerCamelCase , return_tensors="pt" , )
a = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
a = self.tokenizer(
__lowerCamelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=__lowerCamelCase , return_tensors="pt" , )
a = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
a = slerp(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# duplicate text embeddings for each generation per prompt
a = text_embeddings.repeat_interleave(__lowerCamelCase , dim=0 )
# set timesteps
a = "offset" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
a = {}
if accepts_offset:
a = 1
self.scheduler.set_timesteps(__lowerCamelCase , **__lowerCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
a , a = self.get_timesteps(__lowerCamelCase , __lowerCamelCase , self.device )
a = timesteps[:1].repeat(__lowerCamelCase )
# Preprocess image
a = preprocess(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
a = self.prepare_latents(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , text_embeddings.dtype , self.device , __lowerCamelCase )
a = preprocess(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
a = self.prepare_latents(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , text_embeddings.dtype , self.device , __lowerCamelCase )
a = slerp(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if clip_guidance_scale > 0:
a = self.get_clip_image_embeddings(__lowerCamelCase , __lowerCamelCase )
a = self.get_clip_image_embeddings(__lowerCamelCase , __lowerCamelCase )
a = slerp(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
a = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
a = content_text_input.input_ids.shape[-1]
a = self.tokenizer([""] , padding="max_length" , max_length=__lowerCamelCase , return_tensors="pt" )
a = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
a = uncond_embeddings.repeat_interleave(__lowerCamelCase , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
a = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
a = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
a = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
a = torch.randn(__lowerCamelCase , generator=__lowerCamelCase , device="cpu" , dtype=__lowerCamelCase ).to(
self.device )
else:
a = torch.randn(__lowerCamelCase , generator=__lowerCamelCase , device=self.device , dtype=__lowerCamelCase )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
a = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
a = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
a = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
a = {}
if accepts_eta:
a = eta
# check if the scheduler accepts generator
a = "generator" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
a = generator
with self.progress_bar(total=__lowerCamelCase ):
for i, t in enumerate(__lowerCamelCase ):
# expand the latents if we are doing classifier free guidance
a = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
a = self.scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase )
# predict the noise residual
a = self.unet(__lowerCamelCase , __lowerCamelCase , encoder_hidden_states=__lowerCamelCase ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
a , a = noise_pred.chunk(2 )
a = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
a = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
a , a = self.cond_fn(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , )
# compute the previous noisy sample x_t -> x_t-1
a = self.scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
a = 1 / 0.18_215 * latents
a = self.vae.decode(__lowerCamelCase ).sample
a = (image / 2 + 0.5).clamp(0 , 1 )
a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
a = self.numpy_to_pil(__lowerCamelCase )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=__lowerCamelCase , nsfw_content_detected=__lowerCamelCase )
| 107 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : str = logging.get_logger(__name__)
__lowerCAmelCase : Dict = {'vocab_file': 'spiece.model'}
__lowerCAmelCase : Optional[int] = {
'vocab_file': {
'bert_for_seq_generation': (
'https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'
),
}
}
__lowerCAmelCase : Dict = {'bert_for_seq_generation': 512}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : List[int] = []
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["""input_ids""", """attention_mask"""]
def __init__( self : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : str="<s>" , __lowerCamelCase : Optional[int]="</s>" , __lowerCamelCase : int="<unk>" , __lowerCamelCase : List[str]="<pad>" , __lowerCamelCase : Union[str, Any]="<::::>" , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : List[str] , ) -> None:
a = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , sep_token=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
a = vocab_file
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCamelCase )
@property
def __UpperCAmelCase ( self : Dict ) -> Dict:
return self.sp_model.get_piece_size()
def __UpperCAmelCase ( self : Tuple ) -> Optional[int]:
a = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ) -> Optional[Any]:
a = self.__dict__.copy()
a = None
return state
def __setstate__( self : Optional[Any] , __lowerCamelCase : Dict ) -> Optional[Any]:
a = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a = {}
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : str ) -> List[str]:
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
def __UpperCAmelCase ( self : str , __lowerCamelCase : Union[str, Any] ) -> int:
return self.sp_model.piece_to_id(__lowerCamelCase )
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : List[str] ) -> Any:
a = self.sp_model.IdToPiece(__lowerCamelCase )
return token
def __UpperCAmelCase ( self : Any , __lowerCamelCase : Dict ) -> Any:
a = []
a = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowerCamelCase ) + token
a = []
else:
current_sub_tokens.append(__lowerCamelCase )
out_string += self.sp_model.decode(__lowerCamelCase )
return out_string.strip()
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
a = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , "wb" ) as fi:
a = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
| 107 | 1 |
"""simple docstring"""
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
lowercase__ = logging.get_logger(__name__)
class __snake_case ( __lowerCAmelCase ):
a__ = """vision-encoder-decoder"""
a__ = True
def __init__( self , **lowercase) -> Optional[int]:
'''simple docstring'''
super().__init__(**lowercase)
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f'A configuraton of type {self.model_type} cannot be instantiated because '
f'not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}')
a__: List[Any] = kwargs.pop('encoder')
a__: int = encoder_config.pop('model_type')
a__: List[Any] = kwargs.pop('decoder')
a__: str = decoder_config.pop('model_type')
a__: Tuple = AutoConfig.for_model(lowercase , **lowercase)
a__: Optional[Any] = AutoConfig.for_model(lowercase , **lowercase)
a__: str = True
@classmethod
def lowerCamelCase_ ( cls , lowercase , lowercase , **lowercase) -> PretrainedConfig:
'''simple docstring'''
logger.info('Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config')
a__: int = True
a__: Tuple = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowercase)
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
a__: List[str] = copy.deepcopy(self.__dict__)
a__: int = self.encoder.to_dict()
a__: List[Any] = self.decoder.to_dict()
a__: List[Any] = self.__class__.model_type
return output
class __snake_case ( __lowerCAmelCase ):
a__ = version.parse("""1.11""" )
@property
def lowerCamelCase_ ( self) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def lowerCamelCase_ ( self) -> float:
'''simple docstring'''
return 1e-4
@property
def lowerCamelCase_ ( self) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict({'last_hidden_state': {0: 'batch', 1: 'encoder_sequence'}})
class __snake_case ( __lowerCAmelCase ):
@property
def lowerCamelCase_ ( self) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
a__: Tuple = OrderedDict()
a__: List[str] = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
a__: Any = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
a__: Any = {0: 'batch', 1: 'encoder_sequence'}
return common_inputs
def lowerCamelCase_ ( self , lowercase , lowercase = -1 , lowercase = -1 , lowercase = False , lowercase = None , ) -> Mapping[str, Any]:
'''simple docstring'''
import torch
a__: Union[str, Any] = OrderedDict()
a__: str = super().generate_dummy_inputs(
lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase)
a__: Optional[int] = dummy_input['input_ids'].shape
a__: List[str] = (batch, encoder_sequence, self._config.encoder_hidden_size)
a__: Tuple = dummy_input.pop('input_ids')
a__: Union[str, Any] = dummy_input.pop('attention_mask')
a__: List[Any] = torch.zeros(lowercase)
return common_inputs
class __snake_case ( __lowerCAmelCase ):
@property
def lowerCamelCase_ ( self) -> None:
'''simple docstring'''
pass
def lowerCamelCase_ ( self , lowercase) -> OnnxConfig:
'''simple docstring'''
return VisionEncoderDecoderEncoderOnnxConfig(lowercase)
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase = "default") -> OnnxConfig:
'''simple docstring'''
a__: Any = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(lowercase , lowercase)
| 370 | """simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __snake_case ( unittest.TestCase ):
@property
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
torch.manual_seed(0)
a__: str = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
torch.manual_seed(0)
a__: List[Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0)
a__: Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(lowercase)
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: Union[str, Any] = self.dummy_uncond_unet
a__: Optional[int] = DDIMScheduler()
a__: Optional[int] = self.dummy_vq_model
a__: Union[str, Any] = LDMPipeline(unet=lowercase , vqvae=lowercase , scheduler=lowercase)
ldm.to(lowercase)
ldm.set_progress_bar_config(disable=lowercase)
a__: str = torch.manual_seed(0)
a__: Dict = ldm(generator=lowercase , num_inference_steps=2 , output_type='numpy').images
a__: Union[str, Any] = torch.manual_seed(0)
a__: int = ldm(generator=lowercase , num_inference_steps=2 , output_type='numpy' , return_dict=lowercase)[0]
a__: Union[str, Any] = image[0, -3:, -3:, -1]
a__: int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a__: int = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172])
a__: Optional[Any] = 1e-2 if torch_device != 'mps' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < tolerance
@slow
@require_torch
class __snake_case ( unittest.TestCase ):
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
a__: Union[str, Any] = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256')
ldm.to(lowercase)
ldm.set_progress_bar_config(disable=lowercase)
a__: List[str] = torch.manual_seed(0)
a__: Optional[int] = ldm(generator=lowercase , num_inference_steps=5 , output_type='numpy').images
a__: Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
a__: int = np.array([0.4399, 0.44975, 0.46825, 0.474, 0.4359, 0.4581, 0.45095, 0.4341, 0.4447])
a__: Any = 1e-2 if torch_device != 'mps' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice).max() < tolerance
| 203 | 0 |
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ : int = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class __lowerCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
snake_case_ : Optional[int] = XLMProphetNetTokenizer
snake_case_ : Tuple = False
snake_case_ : Union[str, Any] = True
def UpperCamelCase ( self : str ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase = XLMProphetNetTokenizer(snake_case__ , keep_accents=snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
_UpperCAmelCase = "[PAD]"
_UpperCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "[PAD]" )
self.assertEqual(vocab_keys[1] , "[CLS]" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(snake_case__ ) , 1_012 )
def UpperCamelCase ( self : str ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_012 )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
_UpperCAmelCase = XLMProphetNetTokenizer(snake_case__ , keep_accents=snake_case__ )
_UpperCAmelCase = tokenizer.tokenize("This is a test" )
self.assertListEqual(snake_case__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_UpperCAmelCase = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
snake_case__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(
snake_case__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(snake_case__ )
self.assertListEqual(
snake_case__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"[UNK]",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"[UNK]",
".",
] , )
@cached_property
def UpperCamelCase ( self : int ):
"""simple docstring"""
return XLMProphetNetTokenizer.from_pretrained("microsoft/xprophetnet-large-wiki100-cased" )
@slow
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
_UpperCAmelCase = "Hello World!"
_UpperCAmelCase = [35_389, 6_672, 49, 2]
self.assertListEqual(snake_case__ , self.big_tokenizer.encode(snake_case__ ) )
@slow
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
_UpperCAmelCase = {"input_ids": [[11_073, 82_783, 18, 26, 82_783, 549, 51_540, 248, 17_209, 1_301, 217, 20, 215_186, 1_325, 147, 17_209, 1_301, 217, 20, 56_370, 53, 122_020, 20, 16_477, 27, 87_355, 4_548, 20, 4_728, 78_392, 17, 159_969, 18, 26, 24_491, 629, 15, 538, 22_704, 5_439, 15, 2_788, 24_491, 9_885, 15, 43_534, 605, 15, 814, 18_403, 33_200, 29, 15, 43_534, 24_458, 12_410, 111, 24_966, 83_669, 9_637, 144_068, 26, 850, 22_346, 27, 147, 24_966, 83_669, 83_490, 26, 39_113, 735, 27, 689, 656, 2_800, 1_339, 4_600, 53, 122_020, 115_785, 34, 816, 1_339, 46_887, 18, 147, 53_905, 1_951, 42_238, 41_170, 17_732, 834, 436, 15, 27_523, 98_733, 217, 147, 5_542, 4_981, 930, 17_347, 16, 2], [20_091, 629, 94, 82_786, 58, 490, 20, 1_528, 84, 53_905, 344, 80_592, 110_128, 18_822, 5_267, 1_306, 62, 152_537, 308, 7_997, 401, 124_427, 549, 35_442, 225, 109, 15_055, 25_748, 147, 7_119, 43_712, 34, 767, 135_366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63_784, 119_466, 17, 147_808, 88_214, 18, 656, 81, 32, 3_296, 10_280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name="microsoft/xprophetnet-large-wiki100-cased" , revision="1acad1643ddd54a44df6a1b797ada8373685d90e" , )
| 133 |
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
lowercase_ : Union[str, Any] = data_utils.TransfoXLTokenizer
lowercase_ : int = data_utils.TransfoXLCorpus
lowercase_ : Dict = data_utils
lowercase_ : Dict = data_utils
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(snake_case_ , "rb" ) as fp:
_UpperCAmelCase = pickle.load(snake_case_ , encoding="latin1" )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
_UpperCAmelCase = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"]
print(f"""Save vocabulary to {pytorch_vocab_dump_path}""" )
_UpperCAmelCase = corpus.vocab.__dict__
torch.save(snake_case_ , snake_case_ )
_UpperCAmelCase = corpus.__dict__
corpus_dict_no_vocab.pop("vocab" , snake_case_ )
_UpperCAmelCase = pytorch_dump_folder_path + "/" + CORPUS_NAME
print(f"""Save dataset to {pytorch_dataset_dump_path}""" )
torch.save(snake_case_ , snake_case_ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
_UpperCAmelCase = os.path.abspath(snake_case_ )
_UpperCAmelCase = os.path.abspath(snake_case_ )
print(f"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" )
# Initialise PyTorch model
if transfo_xl_config_file == "":
_UpperCAmelCase = TransfoXLConfig()
else:
_UpperCAmelCase = TransfoXLConfig.from_json_file(snake_case_ )
print(f"""Building PyTorch model from configuration: {config}""" )
_UpperCAmelCase = TransfoXLLMHeadModel(snake_case_ )
_UpperCAmelCase = load_tf_weights_in_transfo_xl(snake_case_ , snake_case_ , snake_case_ )
# Save pytorch-model
_UpperCAmelCase = os.path.join(snake_case_ , snake_case_ )
_UpperCAmelCase = os.path.join(snake_case_ , snake_case_ )
print(f"""Save PyTorch model to {os.path.abspath(snake_case_ )}""" )
torch.save(model.state_dict() , snake_case_ )
print(f"""Save configuration file to {os.path.abspath(snake_case_ )}""" )
with open(snake_case_ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowercase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--tf_checkpoint_path',
default='',
type=str,
help='An optional path to a TensorFlow checkpoint path to be converted.',
)
parser.add_argument(
'--transfo_xl_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--transfo_xl_dataset_file',
default='',
type=str,
help='An optional dataset file to be converted in a vocabulary.',
)
lowercase_ : List[Any] = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 133 | 1 |
import math
import os
import sys
def a__ ( lowerCAmelCase ) -> str:
UpperCAmelCase__ : Dict = """"""
try:
with open(lowerCAmelCase , """rb""" ) as binary_file:
UpperCAmelCase__ : List[Any] = binary_file.read()
for dat in data:
UpperCAmelCase__ : Optional[int] = F"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print("""File not accessible""" )
sys.exit()
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> None:
lexicon.pop(lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = last_match_id
if math.loga(lowerCAmelCase ).is_integer():
for curr_key in lexicon:
UpperCAmelCase__ : List[Any] = """0""" + lexicon[curr_key]
UpperCAmelCase__ : Tuple = bin(lowerCAmelCase )[2:]
def a__ ( lowerCAmelCase ) -> str:
UpperCAmelCase__ : Tuple = {"""0""": """0""", """1""": """1"""}
UpperCAmelCase__ : Optional[Any] = """""", """"""
UpperCAmelCase__ : str = len(lowerCAmelCase )
for i in range(len(lowerCAmelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
UpperCAmelCase__ : Union[str, Any] = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
index += 1
UpperCAmelCase__ : Optional[Any] = """"""
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
UpperCAmelCase__ : int = lexicon[curr_string]
result += last_match_id
return result
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> str:
UpperCAmelCase__ : int = os.path.getsize(lowerCAmelCase )
UpperCAmelCase__ : int = bin(lowerCAmelCase )[2:]
UpperCAmelCase__ : Optional[int] = len(lowerCAmelCase )
return "0" * (length_length - 1) + file_length_binary + compressed
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> None:
UpperCAmelCase__ : Optional[Any] = 8
try:
with open(lowerCAmelCase , """wb""" ) as opened_file:
UpperCAmelCase__ : List[Any] = [
to_write[i : i + byte_length]
for i in range(0 , len(lowerCAmelCase ) , lowerCAmelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("""10000000""" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(lowerCAmelCase , 2 ).to_bytes(1 , byteorder="""big""" ) )
except OSError:
print("""File not accessible""" )
sys.exit()
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> None:
UpperCAmelCase__ : Optional[Any] = read_file_binary(lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = compress_data(lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = add_file_length(lowerCAmelCase , lowerCAmelCase )
write_file_binary(lowerCAmelCase , lowerCAmelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 365 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"""transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""",
}
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'transfo-xl'
SCREAMING_SNAKE_CASE = ['mems']
SCREAMING_SNAKE_CASE = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__(self , _lowerCamelCase=267735 , _lowerCamelCase=[20000, 40000, 200000] , _lowerCamelCase=1024 , _lowerCamelCase=1024 , _lowerCamelCase=16 , _lowerCamelCase=64 , _lowerCamelCase=4096 , _lowerCamelCase=4 , _lowerCamelCase=False , _lowerCamelCase=18 , _lowerCamelCase=1600 , _lowerCamelCase=1000 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=0 , _lowerCamelCase=-1 , _lowerCamelCase=True , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=True , _lowerCamelCase="normal" , _lowerCamelCase=0.01 , _lowerCamelCase=0.01 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-5 , _lowerCamelCase=0 , **_lowerCamelCase , ):
"""simple docstring"""
UpperCAmelCase__ : Any = vocab_size
UpperCAmelCase__ : Dict = []
self.cutoffs.extend(_lowerCamelCase )
if proj_share_all_but_first:
UpperCAmelCase__ : Optional[int] = [False] + [True] * len(self.cutoffs )
else:
UpperCAmelCase__ : List[Any] = [False] + [False] * len(self.cutoffs )
UpperCAmelCase__ : Dict = d_model
UpperCAmelCase__ : Dict = d_embed
UpperCAmelCase__ : List[Any] = d_head
UpperCAmelCase__ : List[str] = d_inner
UpperCAmelCase__ : Any = div_val
UpperCAmelCase__ : str = pre_lnorm
UpperCAmelCase__ : int = n_layer
UpperCAmelCase__ : Optional[Any] = n_head
UpperCAmelCase__ : Tuple = mem_len
UpperCAmelCase__ : Dict = same_length
UpperCAmelCase__ : Union[str, Any] = attn_type
UpperCAmelCase__ : Optional[int] = clamp_len
UpperCAmelCase__ : str = sample_softmax
UpperCAmelCase__ : Any = adaptive
UpperCAmelCase__ : List[Any] = dropout
UpperCAmelCase__ : List[Any] = dropatt
UpperCAmelCase__ : Tuple = untie_r
UpperCAmelCase__ : str = init
UpperCAmelCase__ : Optional[int] = init_range
UpperCAmelCase__ : Tuple = proj_init_std
UpperCAmelCase__ : str = init_std
UpperCAmelCase__ : List[str] = layer_norm_epsilon
super().__init__(eos_token_id=_lowerCamelCase , **_lowerCamelCase )
@property
def _a (self ):
"""simple docstring"""
logger.info(F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def _a (self , _lowerCamelCase ):
"""simple docstring"""
raise NotImplementedError(
F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 166 | 0 |
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class __a ( A__ , A__ ):
_lowerCAmelCase : List[Any] = '''pixel_values'''
_lowerCAmelCase : Tuple = False
_lowerCAmelCase : str = TimmBackboneConfig
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
requires_backends(self , "timm" )
super().__init__(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name." )
if config.backbone not in timm.list_models():
raise ValueError(F'backbone {config.backbone} is not supported by timm.' )
if hasattr(SCREAMING_SNAKE_CASE , "out_features" ) and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead." )
UpperCamelCase__ : Optional[int] = getattr(SCREAMING_SNAKE_CASE , "use_pretrained_backbone" , SCREAMING_SNAKE_CASE )
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False." )
# We just take the final layer by default. This matches the default for the transformers models.
UpperCamelCase__ : List[str] = config.out_indices if getattr(SCREAMING_SNAKE_CASE , "out_indices" , SCREAMING_SNAKE_CASE ) is not None else (-1,)
UpperCamelCase__ : Any = timm.create_model(
config.backbone , pretrained=SCREAMING_SNAKE_CASE , features_only=config.features_only , in_chans=config.num_channels , out_indices=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
UpperCamelCase__ : Dict = self._backbone.return_layers
UpperCamelCase__ : List[Any] = {layer["module"]: str(SCREAMING_SNAKE_CASE ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(SCREAMING_SNAKE_CASE )
@classmethod
def __lowercase ( cls : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any] , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["vision", "timm"] )
from ...models.timm_backbone import TimmBackboneConfig
UpperCamelCase__ : List[str] = kwargs.pop("config" , TimmBackboneConfig() )
UpperCamelCase__ : Tuple = kwargs.pop("use_timm_backbone" , SCREAMING_SNAKE_CASE )
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones" )
UpperCamelCase__ : Any = kwargs.pop("num_channels" , config.num_channels )
UpperCamelCase__ : List[Any] = kwargs.pop("features_only" , config.features_only )
UpperCamelCase__ : Optional[int] = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone )
UpperCamelCase__ : List[Any] = kwargs.pop("out_indices" , config.out_indices )
UpperCamelCase__ : Union[str, Any] = TimmBackboneConfig(
backbone=SCREAMING_SNAKE_CASE , num_channels=SCREAMING_SNAKE_CASE , features_only=SCREAMING_SNAKE_CASE , use_pretrained_backbone=SCREAMING_SNAKE_CASE , out_indices=SCREAMING_SNAKE_CASE , )
return super()._from_config(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
pass
def __lowercase ( self : Any , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[str]=None , SCREAMING_SNAKE_CASE : Tuple=None , SCREAMING_SNAKE_CASE : Dict=None , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__ : int = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase__ : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase__ : Any = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
UpperCamelCase__ : Dict = self._all_layers
UpperCamelCase__ : Optional[Any] = self._backbone(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = self._return_layers
UpperCamelCase__ : Dict = tuple(hidden_states[i] for i in self.out_indices )
else:
UpperCamelCase__ : int = self._backbone(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = None
UpperCamelCase__ : Dict = tuple(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = tuple(SCREAMING_SNAKE_CASE ) if hidden_states is not None else None
if not return_dict:
UpperCamelCase__ : Optional[int] = (feature_maps,)
if output_hidden_states:
UpperCamelCase__ : Optional[Any] = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=SCREAMING_SNAKE_CASE , hidden_states=SCREAMING_SNAKE_CASE , attentions=SCREAMING_SNAKE_CASE ) | 189 |
from math import factorial
class __a :
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ : Tuple = real
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Union[str, Any] = [1] * rank
else:
UpperCamelCase__ : int = rank
def __repr__( self : Tuple ):
'''simple docstring'''
return (
F'{self.real}+'
F'{"+".join(str(SCREAMING_SNAKE_CASE )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}'
)
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , SCREAMING_SNAKE_CASE )
def __add__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return Dual(self.real + other , self.duals )
UpperCamelCase__ : Optional[int] = self.duals.copy()
UpperCamelCase__ : Any = other.duals.copy()
if len(SCREAMING_SNAKE_CASE ) > len(SCREAMING_SNAKE_CASE ):
o_dual.extend([1] * (len(SCREAMING_SNAKE_CASE ) - len(SCREAMING_SNAKE_CASE )) )
elif len(SCREAMING_SNAKE_CASE ) < len(SCREAMING_SNAKE_CASE ):
s_dual.extend([1] * (len(SCREAMING_SNAKE_CASE ) - len(SCREAMING_SNAKE_CASE )) )
UpperCamelCase__ : Optional[int] = []
for i in range(len(SCREAMING_SNAKE_CASE ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , SCREAMING_SNAKE_CASE )
_lowerCAmelCase : Dict = __add__
def __sub__( self : Tuple , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
return self + other * -1
def __mul__( self : int , SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : str = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , SCREAMING_SNAKE_CASE )
_lowerCAmelCase : Union[str, Any] = __mul__
def __truediv__( self : List[Any] , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : str = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , SCREAMING_SNAKE_CASE )
raise ValueError
def __floordiv__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Dict = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , SCREAMING_SNAKE_CASE )
raise ValueError
def __pow__( self : str , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if n < 0 or isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError("power must be a positive integer" )
if n == 0:
return 1
if n == 1:
return self
UpperCamelCase__ : str = self
for _ in range(n - 1 ):
x *= self
return x
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
if not callable(__lowerCAmelCase ):
raise ValueError("differentiate() requires a function as input for func" )
if not isinstance(__lowerCAmelCase , (float, int) ):
raise ValueError("differentiate() requires a float as input for position" )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("differentiate() requires an int as input for order" )
UpperCamelCase__ : Optional[Any] = Dual(__lowerCAmelCase , 1 )
UpperCamelCase__ : Any = func(__lowerCAmelCase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> List[Any]:
return y**2 * y**4
print(differentiate(f, 9, 2)) | 189 | 1 |
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
snake_case : Dict = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class _snake_case ( unittest.TestCase ):
def __init__( self , _lowerCamelCase , _lowerCamelCase=7 , _lowerCamelCase=3 , _lowerCamelCase=18 , _lowerCamelCase=30 , _lowerCamelCase=400 , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=None , ):
a :Optional[int] = size if size is not None else {'''height''': 20, '''width''': 20}
a :str = parent
a :List[str] = batch_size
a :str = num_channels
a :str = image_size
a :Any = min_resolution
a :Tuple = max_resolution
a :Tuple = size
a :Union[str, Any] = do_normalize
a :int = do_convert_rgb
a :Any = [512, 1024, 2048, 4096]
a :Tuple = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
def SCREAMING_SNAKE_CASE__ ( self ):
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def SCREAMING_SNAKE_CASE__ ( self ):
a :Tuple = '''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'''
a :str = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert('''RGB''' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class _snake_case ( _snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = PixaStructImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self ):
a :Tuple = PixaStructImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self ):
a :Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_convert_rgb''' ) )
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[str] = self.image_processor_tester.prepare_dummy_image()
a :Dict = self.image_processing_class(**self.image_processor_dict )
a :int = 2048
a :str = image_processor(_lowerCamelCase , return_tensors='''pt''' , max_patches=_lowerCamelCase )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def SCREAMING_SNAKE_CASE__ ( self ):
# Initialize image_processor
a :int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a :Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
a :Tuple = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
a :Any = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
a :str = image_processor(
_lowerCamelCase , return_tensors='''pt''' , max_patches=_lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def SCREAMING_SNAKE_CASE__ ( self ):
# Initialize image_processor
a :Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a :Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
a :Tuple = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
a :List[str] = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(_lowerCamelCase ):
a :str = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_lowerCamelCase ).flattened_patches
a :Any = '''Hello'''
a :List[Any] = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_lowerCamelCase , header_text=_lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
a :List[Any] = image_processor(
_lowerCamelCase , return_tensors='''pt''' , max_patches=_lowerCamelCase , header_text=_lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def SCREAMING_SNAKE_CASE__ ( self ):
# Initialize image_processor
a :Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
a :Tuple = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
a :Union[str, Any] = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
a :Any = image_processor(
_lowerCamelCase , return_tensors='''pt''' , max_patches=_lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def SCREAMING_SNAKE_CASE__ ( self ):
# Initialize image_processor
a :str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a :Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
a :Dict = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
a :Tuple = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
a :Optional[int] = image_processor(
_lowerCamelCase , return_tensors='''pt''' , max_patches=_lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class _snake_case ( _snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = PixaStructImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self ):
a :str = PixaStructImageProcessingTester(self , num_channels=4 )
a :Tuple = 3
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self ):
a :Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_convert_rgb''' ) )
def SCREAMING_SNAKE_CASE__ ( self ):
# Initialize image_processor
a :Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a :Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
a :int = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
a :Dict = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
a :Dict = image_processor(
_lowerCamelCase , return_tensors='''pt''' , max_patches=_lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 281 |
def __lowerCamelCase ( UpperCAmelCase_ : int = 100 ):
"""simple docstring"""
a :List[Any] = 0
a :List[Any] = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 281 | 1 |
import torch
from torch import nn
class __SCREAMING_SNAKE_CASE( nn.Module ):
def __init__( self: Optional[int] , UpperCamelCase: int , UpperCamelCase: List[Any] , UpperCamelCase: Any , UpperCamelCase: Dict , UpperCamelCase: Optional[Any]=1 , UpperCamelCase: Dict=False ) -> Dict:
super().__init__()
snake_case__ = n_token
snake_case__ = d_embed
snake_case__ = d_proj
snake_case__ = cutoffs + [n_token]
snake_case__ = [0] + self.cutoffs
snake_case__ = div_val
snake_case__ = self.cutoffs[0]
snake_case__ = len(self.cutoffs ) - 1
snake_case__ = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
snake_case__ = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
snake_case__ = nn.Parameter(torch.zeros(self.n_clusters ) )
snake_case__ = nn.ModuleList()
snake_case__ = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(UpperCamelCase , UpperCamelCase ) ) )
else:
self.out_projs.append(UpperCamelCase )
self.out_layers.append(nn.Linear(UpperCamelCase , UpperCamelCase ) )
else:
for i in range(len(self.cutoffs ) ):
snake_case__ , snake_case__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
snake_case__ = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(UpperCamelCase , UpperCamelCase ) ) )
self.out_layers.append(nn.Linear(UpperCamelCase , r_idx - l_idx ) )
snake_case__ = keep_order
def lowerCAmelCase_ ( self: int , UpperCamelCase: Optional[int] , UpperCamelCase: List[Any] , UpperCamelCase: Optional[int] , UpperCamelCase: List[str] ) -> Union[str, Any]:
if proj is None:
snake_case__ = nn.functional.linear(UpperCamelCase , UpperCamelCase , bias=UpperCamelCase )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
snake_case__ = nn.functional.linear(UpperCamelCase , proj.t().contiguous() )
snake_case__ = nn.functional.linear(UpperCamelCase , UpperCamelCase , bias=UpperCamelCase )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: List[Any] , UpperCamelCase: str=None , UpperCamelCase: Optional[Any]=False ) -> str:
if labels is not None:
# Shift so that tokens < n predict n
snake_case__ = hidden[..., :-1, :].contiguous()
snake_case__ = labels[..., 1:].contiguous()
snake_case__ = hidden.view(-1 , hidden.size(-1 ) )
snake_case__ = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('Input and labels should have the same size in the batch dimension.' )
else:
snake_case__ = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
snake_case__ = self._compute_logit(UpperCamelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
snake_case__ = labels != -1_00
snake_case__ = torch.zeros_like(UpperCamelCase , dtype=hidden.dtype , device=hidden.device )
snake_case__ = (
-nn.functional.log_softmax(UpperCamelCase , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
snake_case__ = nn.functional.log_softmax(UpperCamelCase , dim=-1 )
else:
# construct weights and biases
snake_case__ , snake_case__ = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
snake_case__ , snake_case__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
snake_case__ = self.out_layers[0].weight[l_idx:r_idx]
snake_case__ = self.out_layers[0].bias[l_idx:r_idx]
else:
snake_case__ = self.out_layers[i].weight
snake_case__ = self.out_layers[i].bias
if i == 0:
snake_case__ = torch.cat([weight_i, self.cluster_weight] , dim=0 )
snake_case__ = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(UpperCamelCase )
biases.append(UpperCamelCase )
snake_case__ , snake_case__ , snake_case__ = weights[0], biases[0], self.out_projs[0]
snake_case__ = self._compute_logit(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
snake_case__ = nn.functional.log_softmax(UpperCamelCase , dim=1 )
if labels is None:
snake_case__ = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
snake_case__ = torch.zeros_like(UpperCamelCase , dtype=hidden.dtype , device=hidden.device )
snake_case__ = 0
snake_case__ = [0] + self.cutoffs
for i in range(len(UpperCamelCase ) - 1 ):
snake_case__ , snake_case__ = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
snake_case__ = (labels >= l_idx) & (labels < r_idx)
snake_case__ = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
snake_case__ = labels.index_select(0 , UpperCamelCase ) - l_idx
snake_case__ = head_logprob.index_select(0 , UpperCamelCase )
snake_case__ = hidden.index_select(0 , UpperCamelCase )
else:
snake_case__ = hidden
if i == 0:
if labels is not None:
snake_case__ = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
snake_case__ = head_logprob[:, : self.cutoffs[0]]
else:
snake_case__ , snake_case__ , snake_case__ = weights[i], biases[i], self.out_projs[i]
snake_case__ = self._compute_logit(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
snake_case__ = nn.functional.log_softmax(UpperCamelCase , dim=1 )
snake_case__ = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
snake_case__ = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
snake_case__ = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
snake_case__ = logprob_i
if labels is not None:
if (hasattr(self , 'keep_order' ) and self.keep_order) or keep_order:
out.index_copy_(0 , UpperCamelCase , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def lowerCAmelCase_ ( self: Dict , UpperCamelCase: Any ) -> Optional[Any]:
if self.n_clusters == 0:
snake_case__ = self._compute_logit(UpperCamelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(UpperCamelCase , dim=-1 )
else:
# construct weights and biases
snake_case__ , snake_case__ = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
snake_case__ , snake_case__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
snake_case__ = self.out_layers[0].weight[l_idx:r_idx]
snake_case__ = self.out_layers[0].bias[l_idx:r_idx]
else:
snake_case__ = self.out_layers[i].weight
snake_case__ = self.out_layers[i].bias
if i == 0:
snake_case__ = torch.cat([weight_i, self.cluster_weight] , dim=0 )
snake_case__ = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(UpperCamelCase )
biases.append(UpperCamelCase )
snake_case__ , snake_case__ , snake_case__ = weights[0], biases[0], self.out_projs[0]
snake_case__ = self._compute_logit(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
snake_case__ = hidden.new_empty((head_logit.size(0 ), self.n_token) )
snake_case__ = nn.functional.log_softmax(UpperCamelCase , dim=1 )
snake_case__ = [0] + self.cutoffs
for i in range(len(UpperCamelCase ) - 1 ):
snake_case__ , snake_case__ = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
snake_case__ = head_logprob[:, : self.cutoffs[0]]
else:
snake_case__ , snake_case__ , snake_case__ = weights[i], biases[i], self.out_projs[i]
snake_case__ = self._compute_logit(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
snake_case__ = nn.functional.log_softmax(UpperCamelCase , dim=1 )
snake_case__ = head_logprob[:, -i] + tail_logprob_i
snake_case__ = logprob_i
return out
| 307 |
def a_ ( _A = 1000 ) -> int:
"""simple docstring"""
return sum(e for e in range(3 , _A ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 307 | 1 |
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
_lowerCamelCase = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
_lowerCamelCase = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(1_00_00):
out_file.write(data)
_lowerCamelCase = BeautifulSoup(res.text, 'html.parser')
_lowerCamelCase = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(F"https://google.com{link.get('href')}")
| 177 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
_lowerCamelCase = False
class a ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCamelCase_ ( self : Any ):
return 12
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
return 12
@property
def lowerCamelCase_ ( self : Optional[Any] ):
return 32
@property
def lowerCamelCase_ ( self : List[Any] ):
torch.manual_seed(0 )
UpperCAmelCase_ = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def lowerCamelCase_ ( self : Tuple ):
UpperCAmelCase_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def lowerCamelCase_ ( self : int ):
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(__snake_case )
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
torch.manual_seed(0 )
UpperCAmelCase_ = 12
UpperCAmelCase_ = 12
UpperCAmelCase_ = {
'''attention_bias''': True,
'''cross_attention_dim''': 32,
'''attention_head_dim''': height * width,
'''num_attention_heads''': 1,
'''num_vector_embeds''': self.num_embed,
'''num_embeds_ada_norm''': self.num_embeds_ada_norm,
'''norm_num_groups''': 32,
'''sample_size''': width,
'''activation_fn''': '''geglu-approximate''',
}
UpperCAmelCase_ = TransformeraDModel(**__snake_case )
return model
def lowerCamelCase_ ( self : int ):
UpperCAmelCase_ = '''cpu'''
UpperCAmelCase_ = self.dummy_vqvae
UpperCAmelCase_ = self.dummy_text_encoder
UpperCAmelCase_ = self.dummy_tokenizer
UpperCAmelCase_ = self.dummy_transformer
UpperCAmelCase_ = VQDiffusionScheduler(self.num_embed )
UpperCAmelCase_ = LearnedClassifierFreeSamplingEmbeddings(learnable=__snake_case )
UpperCAmelCase_ = VQDiffusionPipeline(
vqvae=__snake_case , text_encoder=__snake_case , tokenizer=__snake_case , transformer=__snake_case , scheduler=__snake_case , learned_classifier_free_sampling_embeddings=__snake_case , )
UpperCAmelCase_ = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase_ = '''teddy bear playing in the pool'''
UpperCAmelCase_ = torch.Generator(device=__snake_case ).manual_seed(0 )
UpperCAmelCase_ = pipe([prompt] , generator=__snake_case , num_inference_steps=2 , output_type='''np''' )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = torch.Generator(device=__snake_case ).manual_seed(0 )
UpperCAmelCase_ = pipe(
[prompt] , generator=__snake_case , output_type='''np''' , return_dict=__snake_case , num_inference_steps=2 )[0]
UpperCAmelCase_ = image[0, -3:, -3:, -1]
UpperCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
UpperCAmelCase_ = np.array([0.6_551, 0.6_168, 0.5_008, 0.5_676, 0.5_659, 0.4_295, 0.6_073, 0.5_599, 0.4_992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self : str ):
UpperCAmelCase_ = '''cpu'''
UpperCAmelCase_ = self.dummy_vqvae
UpperCAmelCase_ = self.dummy_text_encoder
UpperCAmelCase_ = self.dummy_tokenizer
UpperCAmelCase_ = self.dummy_transformer
UpperCAmelCase_ = VQDiffusionScheduler(self.num_embed )
UpperCAmelCase_ = LearnedClassifierFreeSamplingEmbeddings(
learnable=__snake_case , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
UpperCAmelCase_ = VQDiffusionPipeline(
vqvae=__snake_case , text_encoder=__snake_case , tokenizer=__snake_case , transformer=__snake_case , scheduler=__snake_case , learned_classifier_free_sampling_embeddings=__snake_case , )
UpperCAmelCase_ = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase_ = '''teddy bear playing in the pool'''
UpperCAmelCase_ = torch.Generator(device=__snake_case ).manual_seed(0 )
UpperCAmelCase_ = pipe([prompt] , generator=__snake_case , num_inference_steps=2 , output_type='''np''' )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = torch.Generator(device=__snake_case ).manual_seed(0 )
UpperCAmelCase_ = pipe(
[prompt] , generator=__snake_case , output_type='''np''' , return_dict=__snake_case , num_inference_steps=2 )[0]
UpperCAmelCase_ = image[0, -3:, -3:, -1]
UpperCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
UpperCAmelCase_ = np.array([0.6_693, 0.6_075, 0.4_959, 0.5_701, 0.5_583, 0.4_333, 0.6_171, 0.5_684, 0.4_988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self : Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : Optional[int] ):
UpperCAmelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' )
UpperCAmelCase_ = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' )
UpperCAmelCase_ = pipeline.to(__snake_case )
pipeline.set_progress_bar_config(disable=__snake_case )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
UpperCAmelCase_ = torch.Generator(device=__snake_case ).manual_seed(0 )
UpperCAmelCase_ = pipeline(
'''teddy bear playing in the pool''' , num_images_per_prompt=1 , generator=__snake_case , output_type='''np''' , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (2_56, 2_56, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 177 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' )
lowerCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained('google/mt5-small' )
lowerCAmelCase__ : Union[str, Any] = tokenizer('Hello there' , return_tensors='tf' ).input_ids
lowerCAmelCase__ : Dict = tokenizer('Hi I am' , return_tensors='tf' ).input_ids
lowerCAmelCase__ : Union[str, Any] = model(a , labels=a ).loss
lowerCAmelCase__ : List[str] = -tf.math.reduce_mean(a ).numpy()
lowerCAmelCase__ : str = -2_1.2_2_8_1_6_8
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 ) | 212 |
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class A__ ( __magic_name__ ):
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = tempfile.mkdtemp()
lowerCAmelCase__ : Dict = 8
# DPR tok
lowerCAmelCase__ : str = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
lowerCAmelCase__ : List[str] = os.path.join(self.tmpdirname , 'dpr_tokenizer' )
os.makedirs(a , exist_ok=a )
lowerCAmelCase__ : Union[str, Any] = os.path.join(a , DPR_VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
# BART tok
lowerCAmelCase__ : Optional[Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
lowerCAmelCase__ : str = dict(zip(a , range(len(a ) ) ) )
lowerCAmelCase__ : int = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
lowerCAmelCase__ : Dict = {'unk_token': '<unk>'}
lowerCAmelCase__ : Dict = os.path.join(self.tmpdirname , 'bart_tokenizer' )
os.makedirs(a , exist_ok=a )
lowerCAmelCase__ : Optional[Any] = os.path.join(a , BART_VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase__ : Any = os.path.join(a , BART_VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(a ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(a ) )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , 'rag_tokenizer' )
lowerCAmelCase__ : Any = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
lowerCAmelCase__ : Any = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(a )
rag_tokenizer.save_pretrained(a )
lowerCAmelCase__ : List[str] = RagTokenizer.from_pretrained(a , config=a )
self.assertIsInstance(new_rag_tokenizer.question_encoder , a )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , a )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = RagTokenizer.from_pretrained('facebook/rag-token-nq' )
lowerCAmelCase__ : Any = [
'who got the first nobel prize in physics',
'when is the next deadpool movie being released',
'which mode is used for short wave broadcast service',
'who is the owner of reading football club',
'when is the next scandal episode coming out',
'when is the last time the philadelphia won the superbowl',
'what is the most current adobe flash player version',
'how many episodes are there in dragon ball z',
'what is the first step in the evolution of the eye',
'where is gall bladder situated in human body',
'what is the main mineral in lithium batteries',
'who is the president of usa right now',
'where do the greasers live in the outsiders',
'panda is a national animal of which country',
'what is the name of manchester united stadium',
]
lowerCAmelCase__ : List[str] = tokenizer(a )
self.assertIsNotNone(a )
@slow
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = RagTokenizer.from_pretrained('facebook/rag-sequence-nq' )
lowerCAmelCase__ : List[str] = [
'who got the first nobel prize in physics',
'when is the next deadpool movie being released',
'which mode is used for short wave broadcast service',
'who is the owner of reading football club',
'when is the next scandal episode coming out',
'when is the last time the philadelphia won the superbowl',
'what is the most current adobe flash player version',
'how many episodes are there in dragon ball z',
'what is the first step in the evolution of the eye',
'where is gall bladder situated in human body',
'what is the main mineral in lithium batteries',
'who is the president of usa right now',
'where do the greasers live in the outsiders',
'panda is a national animal of which country',
'what is the name of manchester united stadium',
]
lowerCAmelCase__ : int = tokenizer(a )
self.assertIsNotNone(a ) | 212 | 1 |
'''simple docstring'''
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
__a = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCAmelCase_ ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self : List[str] , snake_case_ : Optional[int] , snake_case_ : str=768 ):
super().__init__(snake_case_ )
snake_case__ : Dict = proj_size
snake_case__ : Tuple = CLIPVisionModel(snake_case_ )
snake_case__ : str = PaintByExampleMapper(snake_case_ )
snake_case__ : Optional[Any] = nn.LayerNorm(config.hidden_size )
snake_case__ : int = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
snake_case__ : Optional[Any] = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def lowerCamelCase ( self : Optional[Any] , snake_case_ : int , snake_case_ : Optional[Any]=False ):
snake_case__ : Union[str, Any] = self.model(pixel_values=snake_case_ )
snake_case__ : Dict = clip_output.pooler_output
snake_case__ : Tuple = self.mapper(latent_states[:, None] )
snake_case__ : Union[str, Any] = self.final_layer_norm(snake_case_ )
snake_case__ : Any = self.proj_out(snake_case_ )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class UpperCAmelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , snake_case_ : int ):
super().__init__()
snake_case__ : Dict = (config.num_hidden_layers + 1) // 5
snake_case__ : Tuple = config.hidden_size
snake_case__ : Dict = 1
snake_case__ : Any = nn.ModuleList(
[
BasicTransformerBlock(snake_case_ , snake_case_ , snake_case_ , activation_fn="""gelu""" , attention_bias=snake_case_ )
for _ in range(snake_case_ )
] )
def lowerCamelCase ( self : Dict , snake_case_ : str ):
for block in self.blocks:
snake_case__ : Optional[Any] = block(snake_case_ )
return hidden_states
| 352 |
'''simple docstring'''
def __snake_case( _lowerCAmelCase ) -> int:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("""Input value must be an 'int' type""" )
snake_case__ : List[str] = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
__UpperCAmelCase : Union[str, Any] = "Create a default config file for Accelerate with only a few flags set."
def A__ ( SCREAMING_SNAKE_CASE__="no" , SCREAMING_SNAKE_CASE__ = default_json_config_file , SCREAMING_SNAKE_CASE__ = False) -> Optional[Any]:
__snake_case: Any = Path(SCREAMING_SNAKE_CASE__)
path.parent.mkdir(parents=SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__)
if path.exists():
print(
F'''Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.''')
return False
__snake_case: Any = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F'''`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}''')
__snake_case: Optional[int] = {
"""compute_environment""": """LOCAL_MACHINE""",
"""mixed_precision""": mixed_precision,
}
if torch.cuda.is_available():
__snake_case: str = torch.cuda.device_count()
__snake_case: Tuple = num_gpus
__snake_case: Tuple = False
if num_gpus > 1:
__snake_case: str = """MULTI_GPU"""
else:
__snake_case: Any = """NO"""
elif is_xpu_available() and use_xpu:
__snake_case: Dict = torch.xpu.device_count()
__snake_case: List[Any] = num_xpus
__snake_case: List[Any] = False
if num_xpus > 1:
__snake_case: List[Any] = """MULTI_XPU"""
else:
__snake_case: List[Any] = """NO"""
elif is_npu_available():
__snake_case: Optional[Any] = torch.npu.device_count()
__snake_case: List[str] = num_npus
__snake_case: Any = False
if num_npus > 1:
__snake_case: int = """MULTI_NPU"""
else:
__snake_case: Tuple = """NO"""
else:
__snake_case: str = 0
__snake_case: List[str] = True
__snake_case: Tuple = 1
__snake_case: Optional[int] = """NO"""
__snake_case: Optional[Any] = ClusterConfig(**SCREAMING_SNAKE_CASE__)
config.to_json_file(SCREAMING_SNAKE_CASE__)
return path
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> int:
__snake_case: str = parser.add_parser("""default""" , parents=SCREAMING_SNAKE_CASE__ , help=SCREAMING_SNAKE_CASE__ , formatter_class=SCREAMING_SNAKE_CASE__)
parser.add_argument(
"""--config_file""" , default=SCREAMING_SNAKE_CASE__ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , dest="""save_location""" , )
parser.add_argument(
"""--mixed_precision""" , choices=["""no""", """fp16""", """bf16"""] , type=SCREAMING_SNAKE_CASE__ , help="""Whether or not to use mixed precision training. """
"""Choose between FP16 and BF16 (bfloat16) training. """
"""BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.""" , default="""no""" , )
parser.set_defaults(func=SCREAMING_SNAKE_CASE__)
return parser
def A__ ( SCREAMING_SNAKE_CASE__) -> Union[str, Any]:
__snake_case: Optional[Any] = write_basic_config(args.mixed_precision , args.save_location)
if config_file:
print(F'''accelerate configuration saved at {config_file}''')
| 111 |
def A__ ( SCREAMING_SNAKE_CASE__ = 200) -> int:
__snake_case: Optional[int] = [1, 2, 5, 10, 20, 50, 100, 200]
__snake_case: List[Any] = [0] * (pence + 1)
__snake_case: int = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(SCREAMING_SNAKE_CASE__ , pence + 1 , 1):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73_682
| 111 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : str = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "ctrl"
a = ["past_key_values"]
a = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Union[str, Any] , __lowerCamelCase : Union[str, Any]=24_6534 , __lowerCamelCase : str=256 , __lowerCamelCase : str=1280 , __lowerCamelCase : Tuple=8192 , __lowerCamelCase : Optional[Any]=48 , __lowerCamelCase : Any=16 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : Optional[Any]=1e-6 , __lowerCamelCase : Union[str, Any]=0.02 , __lowerCamelCase : Union[str, Any]=True , **__lowerCamelCase : Tuple , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = n_positions
SCREAMING_SNAKE_CASE__ = n_embd
SCREAMING_SNAKE_CASE__ = n_layer
SCREAMING_SNAKE_CASE__ = n_head
SCREAMING_SNAKE_CASE__ = dff
SCREAMING_SNAKE_CASE__ = resid_pdrop
SCREAMING_SNAKE_CASE__ = embd_pdrop
SCREAMING_SNAKE_CASE__ = layer_norm_epsilon
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = use_cache
super().__init__(**__lowerCamelCase )
| 218 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Tuple = {
'''transfo-xl-wt103''': '''https://huggingface.co/transfo-xl-wt103/resolve/main/config.json''',
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "transfo-xl"
a = ["mems"]
a = {
"n_token": "vocab_size",
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Any , __lowerCamelCase : int=26_7735 , __lowerCamelCase : Any=[2_0000, 4_0000, 20_0000] , __lowerCamelCase : Dict=1024 , __lowerCamelCase : Optional[int]=1024 , __lowerCamelCase : List[Any]=16 , __lowerCamelCase : Union[str, Any]=64 , __lowerCamelCase : Dict=4096 , __lowerCamelCase : int=4 , __lowerCamelCase : Dict=False , __lowerCamelCase : Tuple=18 , __lowerCamelCase : Optional[int]=1600 , __lowerCamelCase : str=1000 , __lowerCamelCase : Any=True , __lowerCamelCase : Dict=True , __lowerCamelCase : List[Any]=0 , __lowerCamelCase : int=-1 , __lowerCamelCase : Tuple=True , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : int=0.0 , __lowerCamelCase : int=True , __lowerCamelCase : str="normal" , __lowerCamelCase : List[str]=0.01 , __lowerCamelCase : Any=0.01 , __lowerCamelCase : List[str]=0.02 , __lowerCamelCase : List[str]=1e-5 , __lowerCamelCase : Union[str, Any]=0 , **__lowerCamelCase : int , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = []
self.cutoffs.extend(__lowerCamelCase )
if proj_share_all_but_first:
SCREAMING_SNAKE_CASE__ = [False] + [True] * len(self.cutoffs )
else:
SCREAMING_SNAKE_CASE__ = [False] + [False] * len(self.cutoffs )
SCREAMING_SNAKE_CASE__ = d_model
SCREAMING_SNAKE_CASE__ = d_embed
SCREAMING_SNAKE_CASE__ = d_head
SCREAMING_SNAKE_CASE__ = d_inner
SCREAMING_SNAKE_CASE__ = div_val
SCREAMING_SNAKE_CASE__ = pre_lnorm
SCREAMING_SNAKE_CASE__ = n_layer
SCREAMING_SNAKE_CASE__ = n_head
SCREAMING_SNAKE_CASE__ = mem_len
SCREAMING_SNAKE_CASE__ = same_length
SCREAMING_SNAKE_CASE__ = attn_type
SCREAMING_SNAKE_CASE__ = clamp_len
SCREAMING_SNAKE_CASE__ = sample_softmax
SCREAMING_SNAKE_CASE__ = adaptive
SCREAMING_SNAKE_CASE__ = dropout
SCREAMING_SNAKE_CASE__ = dropatt
SCREAMING_SNAKE_CASE__ = untie_r
SCREAMING_SNAKE_CASE__ = init
SCREAMING_SNAKE_CASE__ = init_range
SCREAMING_SNAKE_CASE__ = proj_init_std
SCREAMING_SNAKE_CASE__ = init_std
SCREAMING_SNAKE_CASE__ = layer_norm_epsilon
super().__init__(eos_token_id=__lowerCamelCase , **__lowerCamelCase )
@property
def lowercase_ ( self : str ) -> Dict:
# Message copied from Transformer-XL documentation
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def lowercase_ ( self : List[str] , __lowerCamelCase : Any ) -> List[Any]:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 218 | 1 |
from collections import defaultdict
def lowerCAmelCase__( lowercase : Union[str, Any] , lowercase : Optional[Any] ) -> bool:
__snake_case : Union[str, Any] = first_str.lower().strip()
__snake_case : Optional[Any] = second_str.lower().strip()
# Remove whitespace
__snake_case : int = first_str.replace(" " , "" )
__snake_case : int = second_str.replace(" " , "" )
# Strings of different lengths are not anagrams
if len(__a ) != len(__a ):
return False
# Default values for count should be 0
__snake_case : defaultdict[str, int] = defaultdict(__a )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(__a ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
_UpperCamelCase = input('''Enter the first string ''').strip()
_UpperCamelCase = input('''Enter the second string ''').strip()
_UpperCamelCase = check_anagrams(input_a, input_b)
print(F'''{input_a} and {input_b} are {'' if status else 'not '}anagrams.''')
| 326 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
__A = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = BartphoTokenizer
lowercase_ = False
lowercase_ = True
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Tuple:
'''simple docstring'''
super().setUp()
lowerCamelCase__: int =["▁This", "▁is", "▁a", "▁t", "est"]
lowerCamelCase__: Tuple =dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
lowerCamelCase__: List[Any] ={"unk_token": "<unk>"}
lowerCamelCase__: Dict =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["monolingual_vocab_file"])
with open(self.monolingual_vocab_file , "w" , encoding="utf-8") as fp:
for token in vocab_tokens:
fp.write(F"""{token} {vocab_tokens[token]}\n""")
lowerCamelCase__: Dict =BartphoTokenizer(UpperCAmelCase_ , self.monolingual_vocab_file , **self.special_tokens_map)
tokenizer.save_pretrained(self.tmpdirname)
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , **UpperCAmelCase_ : Optional[Any]) ->str:
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return BartphoTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[int] ="This is a là test"
lowerCamelCase__: Optional[Any] ="This is a<unk><unk> test"
return input_text, output_text
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: str =BartphoTokenizer(UpperCAmelCase_ , self.monolingual_vocab_file , **self.special_tokens_map)
lowerCamelCase__: List[Any] ="This is a là test"
lowerCamelCase__: Optional[int] ="▁This ▁is ▁a ▁l à ▁t est".split()
lowerCamelCase__: Optional[int] =tokenizer.tokenize(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Tuple =tokens + [tokenizer.unk_token]
lowerCamelCase__: List[Any] =[4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , UpperCAmelCase_)
| 10 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
UpperCamelCase : Optional[Any] = {
"""uw-madison/mra-base-512-4""": """https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json""",
}
class UpperCamelCase ( __UpperCamelCase ):
"""simple docstring"""
A : Optional[Any] = """mra"""
def __init__( self : List[str] , UpperCAmelCase_ : Tuple=5_0_2_6_5 , UpperCAmelCase_ : str=7_6_8 , UpperCAmelCase_ : int=1_2 , UpperCAmelCase_ : Dict=1_2 , UpperCAmelCase_ : Tuple=3_0_7_2 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : List[str]=5_1_2 , UpperCAmelCase_ : Optional[Any]=1 , UpperCAmelCase_ : Tuple=0.02 , UpperCAmelCase_ : int=1e-5 , UpperCAmelCase_ : Optional[int]="absolute" , UpperCAmelCase_ : Optional[Any]=4 , UpperCAmelCase_ : Any="full" , UpperCAmelCase_ : Optional[Any]=0 , UpperCAmelCase_ : List[str]=0 , UpperCAmelCase_ : Any=1 , UpperCAmelCase_ : int=0 , UpperCAmelCase_ : int=2 , **UpperCAmelCase_ : Tuple , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
a : Union[str, Any] = vocab_size
a : Optional[int] = max_position_embeddings
a : Any = hidden_size
a : List[Any] = num_hidden_layers
a : Tuple = num_attention_heads
a : List[Any] = intermediate_size
a : Dict = hidden_act
a : Optional[Any] = hidden_dropout_prob
a : Any = attention_probs_dropout_prob
a : str = initializer_range
a : str = type_vocab_size
a : str = layer_norm_eps
a : Optional[int] = position_embedding_type
a : Any = block_per_row
a : int = approx_mode
a : Union[str, Any] = initial_prior_first_n_blocks
a : Dict = initial_prior_diagonal_n_blocks
| 356 | '''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase ( a_ , unittest.TestCase ):
"""simple docstring"""
A : Union[str, Any] = CTRLTokenizer
A : List[Any] = False
A : Optional[Any] = False
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a : Dict = ['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>']
a : Tuple = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
a : Any = ['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', '']
a : List[Any] = {'unk_token': '<unk>'}
a : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
a : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(UpperCAmelCase_) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ ( self : Tuple , **UpperCAmelCase_ : Dict):
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return CTRLTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : Any):
"""simple docstring"""
a : List[str] = 'adapt react readapt apt'
a : int = 'adapt react readapt apt'
return input_text, output_text
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : int = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
a : str = 'adapt react readapt apt'
a : Optional[Any] = 'adapt re@@ a@@ c@@ t re@@ adapt apt'.split()
a : List[Any] = tokenizer.tokenize(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
a : Union[str, Any] = tokens + [tokenizer.unk_token]
a : Any = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , UpperCAmelCase_)
| 345 | 0 |
def lowerCAmelCase__(__snake_case ) -> bool:
'''simple docstring'''
return sum(i for i in range(1 ,number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("Program to check whether a number is a Perfect number or not...")
_a = int(input("Enter number: ").strip())
print(f"""{number} is {'' if perfect(number) else 'not '}a Perfect Number.""")
| 209 |
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = (UnCLIPScheduler,)
def __lowerCamelCase ( self , **__lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = {
'''num_train_timesteps''': 1_0_0_0,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**__lowerCAmelCase )
return config
def __lowerCamelCase ( self ):
'''simple docstring'''
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
for clip_sample_range in [1, 5, 1_0, 2_0]:
self.check_over_configs(clip_sample_range=__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
for time_step in [0, 5_0_0, 9_9_9]:
for prev_timestep in [None, 5, 1_0_0, 2_5_0, 5_0_0, 7_5_0]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=__lowerCAmelCase , prev_timestep=__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config(variance_type='''fixed_small_log''' )
lowerCamelCase__ = scheduler_class(**__lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.054_9625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.999_4987 ) ) < 1E-5
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config(variance_type='''learned_range''' )
lowerCamelCase__ = scheduler_class(**__lowerCAmelCase )
lowerCamelCase__ = 0.5
assert scheduler._get_variance(1 , predicted_variance=__lowerCAmelCase ) - -10.171_2790 < 1E-5
assert scheduler._get_variance(4_8_7 , predicted_variance=__lowerCAmelCase ) - -5.799_8052 < 1E-5
assert scheduler._get_variance(9_9_9 , predicted_variance=__lowerCAmelCase ) - -0.001_0011 < 1E-5
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config()
lowerCamelCase__ = scheduler_class(**__lowerCAmelCase )
lowerCamelCase__ = scheduler.timesteps
lowerCamelCase__ = self.dummy_model()
lowerCamelCase__ = self.dummy_sample_deter
lowerCamelCase__ = torch.manual_seed(0 )
for i, t in enumerate(__lowerCAmelCase ):
# 1. predict noise residual
lowerCamelCase__ = model(__lowerCAmelCase , __lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
lowerCamelCase__ = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample
lowerCamelCase__ = pred_prev_sample
lowerCamelCase__ = torch.sum(torch.abs(__lowerCAmelCase ) )
lowerCamelCase__ = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 252.268_2495 ) < 1E-2
assert abs(result_mean.item() - 0.328_4743 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config()
lowerCamelCase__ = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(2_5 )
lowerCamelCase__ = scheduler.timesteps
lowerCamelCase__ = self.dummy_model()
lowerCamelCase__ = self.dummy_sample_deter
lowerCamelCase__ = torch.manual_seed(0 )
for i, t in enumerate(__lowerCAmelCase ):
# 1. predict noise residual
lowerCamelCase__ = model(__lowerCAmelCase , __lowerCAmelCase )
if i + 1 == timesteps.shape[0]:
lowerCamelCase__ = None
else:
lowerCamelCase__ = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
lowerCamelCase__ = scheduler.step(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , prev_timestep=__lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample
lowerCamelCase__ = pred_prev_sample
lowerCamelCase__ = torch.sum(torch.abs(__lowerCAmelCase ) )
lowerCamelCase__ = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 258.204_4983 ) < 1E-2
assert abs(result_mean.item() - 0.336_2038 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
| 209 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
__lowercase = logging.get_logger(__name__)
__lowercase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__lowercase = [
'''small''',
'''small-base''',
'''medium''',
'''medium-base''',
'''intermediate''',
'''intermediate-base''',
'''large''',
'''large-base''',
'''xlarge''',
'''xlarge-base''',
]
__lowercase = {
'''vocab_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json''',
'''funnel-transformer/small-base''': (
'''https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json''',
'''funnel-transformer/large-base''': (
'''https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json'''
),
},
}
__lowercase = {F'funnel-transformer/{name}': 512 for name in _model_names}
__lowercase = {F'funnel-transformer/{name}': {'''do_lower_case''': True} for name in _model_names}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : List[str] = VOCAB_FILES_NAMES
a__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
a__ : Any = PRETRAINED_INIT_CONFIGURATION
a__ : List[Any] = FunnelTokenizer
a__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : int = 2
def __init__( self , __lowercase=None , __lowercase=None , __lowercase=True , __lowercase="<unk>" , __lowercase="<sep>" , __lowercase="<pad>" , __lowercase="<cls>" , __lowercase="<mask>" , __lowercase="<s>" , __lowercase="</s>" , __lowercase=True , __lowercase=True , __lowercase=None , __lowercase="##" , **__lowercase , ) -> Tuple:
super().__init__(
__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , bos_token=__lowercase , eos_token=__lowercase , clean_text=__lowercase , tokenize_chinese_chars=__lowercase , strip_accents=__lowercase , wordpieces_prefix=__lowercase , **__lowercase , )
__UpperCamelCase :int = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('''lowercase''' , __lowercase) != do_lower_case
or normalizer_state.get('''strip_accents''' , __lowercase) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __lowercase) != tokenize_chinese_chars
):
__UpperCamelCase :Optional[int] = getattr(__lowercase , normalizer_state.pop('''type'''))
__UpperCamelCase :str = do_lower_case
__UpperCamelCase :Dict = strip_accents
__UpperCamelCase :Optional[int] = tokenize_chinese_chars
__UpperCamelCase :Union[str, Any] = normalizer_class(**__lowercase)
__UpperCamelCase :int = do_lower_case
def UpperCamelCase__ ( self , __lowercase , __lowercase=None) -> Any:
__UpperCamelCase :List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> List[int]:
__UpperCamelCase :Optional[int] = [self.sep_token_id]
__UpperCamelCase :List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls) * [self.cls_token_type_id] + len(token_ids_a + sep) * [0]
return len(cls) * [self.cls_token_type_id] + len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> Tuple[str]:
__UpperCamelCase :Any = self._tokenizer.model.save(__lowercase , name=__lowercase)
return tuple(__lowercase)
| 105 | from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 105 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
lowerCAmelCase__ : str = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : int = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
lowerCAmelCase__ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 143 |
'''simple docstring'''
def _lowercase ( __A ,__A ):
'''simple docstring'''
__UpperCamelCase = len(__A )
__UpperCamelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
__UpperCamelCase = True
# sum is not zero and set is empty then false
for i in range(1 ,required_sum + 1 ):
__UpperCamelCase = False
for i in range(1 ,arr_len + 1 ):
for j in range(1 ,required_sum + 1 ):
if arr[i - 1] > j:
__UpperCamelCase = subset[i - 1][j]
if arr[i - 1] <= j:
__UpperCamelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 | 0 |
"""simple docstring"""
from __future__ import annotations
def a__ ( SCREAMING_SNAKE_CASE : list , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE ) <= 1 or n <= 1:
return
insert_next(SCREAMING_SNAKE_CASE , n - 1 )
rec_insertion_sort(SCREAMING_SNAKE_CASE , n - 1 )
def a__ ( SCREAMING_SNAKE_CASE : list , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if index >= len(SCREAMING_SNAKE_CASE ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
lowerCAmelCase : Tuple = (
collection[index],
collection[index - 1],
)
insert_next(SCREAMING_SNAKE_CASE , index + 1 )
if __name__ == "__main__":
lowerCAmelCase__ = input('''Enter integers separated by spaces: ''')
lowerCAmelCase__ = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 370 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( lowercase , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] =KandinskyVaaImgaImgPipeline
a : Optional[int] =["image_embeds", "negative_image_embeds", "image"]
a : Optional[int] =[
"image_embeds",
"negative_image_embeds",
"image",
]
a : str =[
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
a : Dict =False
@property
def lowercase__ ( self ):
"""simple docstring"""
return 32
@property
def lowercase__ ( self ):
"""simple docstring"""
return 32
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.time_input_dim
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowercase__ ( self ):
"""simple docstring"""
return 100
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase : List[str] = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
lowerCAmelCase : int = UNetaDConditionModel(**snake_case__ )
return model
@property
def lowercase__ ( self ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase : Optional[int] = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = self.dummy_unet
lowerCAmelCase : Optional[int] = self.dummy_movq
lowerCAmelCase : List[str] = {
"num_train_timesteps": 1_000,
"beta_schedule": "linear",
"beta_start": 0.00085,
"beta_end": 0.012,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
lowerCAmelCase : Tuple = DDIMScheduler(**snake_case__ )
lowerCAmelCase : Optional[Any] = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def lowercase__ ( self , snake_case__ , snake_case__=0 ):
"""simple docstring"""
lowerCAmelCase : Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
lowerCAmelCase : Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
snake_case__ )
# create init_image
lowerCAmelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
lowerCAmelCase : List[str] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase : List[str] = Image.fromarray(np.uinta(snake_case__ ) ).convert("RGB" ).resize((256, 256) )
if str(snake_case__ ).startswith("mps" ):
lowerCAmelCase : Optional[int] = torch.manual_seed(snake_case__ )
else:
lowerCAmelCase : Optional[int] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowerCAmelCase : List[str] = {
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = "cpu"
lowerCAmelCase : Dict = self.get_dummy_components()
lowerCAmelCase : Union[str, Any] = self.pipeline_class(**snake_case__ )
lowerCAmelCase : Optional[int] = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : int = pipe(**self.get_dummy_inputs(snake_case__ ) )
lowerCAmelCase : Union[str, Any] = output.images
lowerCAmelCase : Union[str, Any] = pipe(
**self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0]
lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase : int = np.array(
[0.6199778, 0.63984406, 0.46145785, 0.62944984, 0.5622215, 0.47306132, 0.47441456, 0.4607606, 0.48719263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_img2img_frog.npy" )
lowerCAmelCase : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
lowerCAmelCase : Optional[Any] = "A red cartoon frog, 4k"
lowerCAmelCase : int = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(snake_case__ )
lowerCAmelCase : List[Any] = KandinskyVaaImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa )
lowerCAmelCase : Tuple = pipeline.to(snake_case__ )
pipeline.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : str = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCAmelCase , lowerCAmelCase : Optional[Any] = pipe_prior(
snake_case__ , generator=snake_case__ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
lowerCAmelCase : Tuple = pipeline(
image=snake_case__ , image_embeds=snake_case__ , negative_image_embeds=snake_case__ , generator=snake_case__ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , )
lowerCAmelCase : Optional[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
| 133 | 0 |
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
lowerCamelCase__ = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
lowerCamelCase__ = {"""facebook/blenderbot_small-90M""": 512}
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : str = set()
__lowerCAmelCase : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowerCAmelCase : Tuple = char
__lowerCAmelCase : List[str] = set(_UpperCamelCase )
return pairs
class A__ ( _lowerCamelCase):
A_ : Dict = VOCAB_FILES_NAMES
A_ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
A_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : Tuple = ['input_ids', 'attention_mask']
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="__start__" , _SCREAMING_SNAKE_CASE="__end__" , _SCREAMING_SNAKE_CASE="__unk__" , _SCREAMING_SNAKE_CASE="__null__" , **_SCREAMING_SNAKE_CASE , ):
super().__init__(unk_token=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as vocab_handle:
__lowerCAmelCase : Optional[Any] = json.load(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = {v: k for k, v in self.encoder.items()}
with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as merges_handle:
__lowerCAmelCase : Tuple = merges_handle.read().split('\n' )[1:-1]
__lowerCAmelCase : Union[str, Any] = [tuple(merge.split() ) for merge in merges]
__lowerCAmelCase : Union[str, Any] = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
__lowerCAmelCase : Any = {}
@property
def __lowerCamelCase ( self ):
return len(self.encoder )
def __lowerCamelCase ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
if token in self.cache:
return self.cache[token]
__lowerCAmelCase : Tuple = re.sub('([.,!?()])' , R' \1' , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = re.sub('(\')' , R' \1 ' , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = re.sub(R'\s{2,}' , ' ' , _SCREAMING_SNAKE_CASE )
if "\n" in token:
__lowerCAmelCase : Optional[int] = token.replace('\n' , ' __newln__' )
__lowerCAmelCase : Optional[int] = token.split(' ' )
__lowerCAmelCase : Optional[Any] = []
for token in tokens:
if not len(_SCREAMING_SNAKE_CASE ):
continue
__lowerCAmelCase : Optional[int] = token.lower()
__lowerCAmelCase : List[Any] = tuple(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
__lowerCAmelCase : List[str] = get_pairs(_SCREAMING_SNAKE_CASE )
if not pairs:
words.append(_SCREAMING_SNAKE_CASE )
continue
while True:
__lowerCAmelCase : Tuple = min(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : self.bpe_ranks.get(_SCREAMING_SNAKE_CASE , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
__lowerCAmelCase , __lowerCAmelCase : List[Any] = bigram
__lowerCAmelCase : List[str] = []
__lowerCAmelCase : str = 0
while i < len(_SCREAMING_SNAKE_CASE ):
try:
__lowerCAmelCase : int = word.index(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
new_word.extend(word[i:j] )
__lowerCAmelCase : str = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(_SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowerCAmelCase : Tuple = tuple(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = new_word
if len(_SCREAMING_SNAKE_CASE ) == 1:
break
else:
__lowerCAmelCase : int = get_pairs(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = '@@ '.join(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = word[:-4]
__lowerCAmelCase : Dict = word
words.append(_SCREAMING_SNAKE_CASE )
return " ".join(_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = []
__lowerCAmelCase : Optional[int] = re.findall(R'\S+\n?' , _SCREAMING_SNAKE_CASE )
for token in words:
split_tokens.extend(list(self.bpe(_SCREAMING_SNAKE_CASE ).split(' ' ) ) )
return split_tokens
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = token.lower()
return self.encoder.get(_SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
return self.decoder.get(_SCREAMING_SNAKE_CASE , self.unk_token )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = ' '.join(_SCREAMING_SNAKE_CASE ).replace('@@ ' , '' ).strip()
return out_string
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
__lowerCAmelCase : Tuple = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
__lowerCAmelCase : Optional[Any] = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_SCREAMING_SNAKE_CASE , ensure_ascii=_SCREAMING_SNAKE_CASE ) + '\n' )
__lowerCAmelCase : Union[str, Any] = 0
with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _SCREAMING_SNAKE_CASE : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
' Please check that the tokenizer is not corrupted!' )
__lowerCAmelCase : Dict = token_index
writer.write(' '.join(_SCREAMING_SNAKE_CASE ) + '\n' )
index += 1
return vocab_file, merge_file | 86 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class A__ ( unittest.TestCase):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=4_00 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 / 2_55 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__lowerCAmelCase : Any = size if size is not None else {'shortest_edge': 18, 'longest_edge': 13_33}
__lowerCAmelCase : Optional[int] = parent
__lowerCAmelCase : int = batch_size
__lowerCAmelCase : str = num_channels
__lowerCAmelCase : Optional[int] = min_resolution
__lowerCAmelCase : List[Any] = max_resolution
__lowerCAmelCase : Union[str, Any] = do_resize
__lowerCAmelCase : Optional[Any] = size
__lowerCAmelCase : Dict = do_rescale
__lowerCAmelCase : Optional[Any] = rescale_factor
__lowerCAmelCase : Any = do_normalize
__lowerCAmelCase : List[str] = image_mean
__lowerCAmelCase : Union[str, Any] = image_std
__lowerCAmelCase : Optional[int] = do_pad
def __lowerCamelCase ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
if not batched:
__lowerCAmelCase : str = image_inputs[0]
if isinstance(_SCREAMING_SNAKE_CASE , Image.Image ):
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = image.size
else:
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = image.shape[1], image.shape[2]
if w < h:
__lowerCAmelCase : str = int(self.size['shortest_edge'] * h / w )
__lowerCAmelCase : Optional[int] = self.size['shortest_edge']
elif w > h:
__lowerCAmelCase : str = self.size['shortest_edge']
__lowerCAmelCase : Union[str, Any] = int(self.size['shortest_edge'] * w / h )
else:
__lowerCAmelCase : str = self.size['shortest_edge']
__lowerCAmelCase : Optional[Any] = self.size['shortest_edge']
else:
__lowerCAmelCase : str = []
for image in image_inputs:
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowerCAmelCase : Any = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[0] )[0]
__lowerCAmelCase : Dict = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : List[str] = DetrImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = DetrImageProcessingTester(self )
@property
def __lowerCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_rescale' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'rescale_factor' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_pad' ) )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 13_33} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
# Initialize image_processing
__lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
__lowerCAmelCase : int = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase , __lowerCAmelCase : int = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
# Initialize image_processing
__lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
__lowerCAmelCase : Tuple = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : Tuple = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase : Union[str, Any] = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
# Initialize image_processing
__lowerCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
__lowerCAmelCase : Any = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : Dict = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase : Tuple = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : Any = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __lowerCamelCase ( self ):
# prepare image and target
__lowerCAmelCase : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
__lowerCAmelCase : Any = json.loads(f.read() )
__lowerCAmelCase : Tuple = {'image_id': 3_97_69, 'annotations': target}
# encode them
__lowerCAmelCase : Dict = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50' )
__lowerCAmelCase : int = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# verify pixel values
__lowerCAmelCase : str = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
# verify area
__lowerCAmelCase : List[str] = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
__lowerCAmelCase : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# verify image_id
__lowerCAmelCase : Dict = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
__lowerCAmelCase : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
__lowerCAmelCase : Union[str, Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) )
# verify orig_size
__lowerCAmelCase : int = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) )
# verify size
__lowerCAmelCase : List[Any] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) )
@slow
def __lowerCamelCase ( self ):
# prepare image, target and masks_path
__lowerCAmelCase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
__lowerCAmelCase : Optional[int] = json.loads(f.read() )
__lowerCAmelCase : Optional[int] = {'file_name': '000000039769.png', 'image_id': 3_97_69, 'segments_info': target}
__lowerCAmelCase : Union[str, Any] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
__lowerCAmelCase : Optional[int] = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50-panoptic' )
__lowerCAmelCase : Optional[Any] = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , masks_path=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# verify pixel values
__lowerCAmelCase : str = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
# verify area
__lowerCAmelCase : int = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
__lowerCAmelCase : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# verify image_id
__lowerCAmelCase : str = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
__lowerCAmelCase : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
__lowerCAmelCase : str = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) )
# verify masks
__lowerCAmelCase : Dict = 82_28_73
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _SCREAMING_SNAKE_CASE )
# verify orig_size
__lowerCAmelCase : str = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) )
# verify size
__lowerCAmelCase : List[Any] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) ) | 86 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
__snake_case = logging.get_logger(__name__)
__snake_case = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__snake_case = {
"vocab_file": {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"
),
}
}
__snake_case = {
"junnyu/roformer_chinese_small": 1536,
"junnyu/roformer_chinese_base": 1536,
"junnyu/roformer_chinese_char_small": 512,
"junnyu/roformer_chinese_char_base": 512,
"junnyu/roformer_small_discriminator": 128,
"junnyu/roformer_small_generator": 128,
}
__snake_case = {
"junnyu/roformer_chinese_small": {"do_lower_case": True},
"junnyu/roformer_chinese_base": {"do_lower_case": True},
"junnyu/roformer_chinese_char_small": {"do_lower_case": True},
"junnyu/roformer_chinese_char_base": {"do_lower_case": True},
"junnyu/roformer_small_discriminator": {"do_lower_case": True},
"junnyu/roformer_small_generator": {"do_lower_case": True},
}
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : Union[str, Any] = VOCAB_FILES_NAMES
A_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : int = PRETRAINED_INIT_CONFIGURATION
A_ : Dict = RoFormerTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase="[UNK]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[PAD]" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> Dict:
super().__init__(
_A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , )
_a = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('''lowercase''' , _A ) != do_lower_case
or pre_tok_state.get('''strip_accents''' , _A ) != strip_accents
):
_a = getattr(_A , pre_tok_state.pop('''type''' ) )
_a = do_lower_case
_a = strip_accents
_a = pre_tok_class(**_A )
_a = do_lower_case
def __getstate__( self ) -> Dict:
_a = self.__dict__.copy()
_a = BertPreTokenizer()
return state
def __setstate__( self , __UpperCAmelCase ) -> Dict:
_a = d
_a = self.__dict__['_tokenizer'].get_vocab()
_a = PreTokenizer.custom(JiebaPreTokenizer(_A ) )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None ) -> str:
_a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]:
_a = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=False , **__UpperCAmelCase , ) -> Optional[Any]:
_a = BertPreTokenizer()
return super().save_pretrained(_A , _A , _A , _A , **_A ) | 357 |
"""simple docstring"""
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
__snake_case = datasets.load_iris()
__snake_case = np.array(data['''data'''])
__snake_case = np.array(data['''target'''])
__snake_case = data['''target_names''']
__snake_case ,__snake_case ,__snake_case ,__snake_case = train_test_split(X, y)
def A_ ( _lowerCAmelCase : int, _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
return np.linalg.norm(np.array(_lowerCAmelCase ) - np.array(_lowerCAmelCase ) )
def A_ ( _lowerCAmelCase : Optional[int], _lowerCAmelCase : Tuple, _lowerCAmelCase : Optional[Any], _lowerCAmelCase : int, _lowerCAmelCase : str=5 ):
"""simple docstring"""
_a = zip(_lowerCAmelCase, _lowerCAmelCase )
# List of distances of all points from the point to be classified
_a = []
for data_point in data:
_a = euclidean_distance(data_point[0], _lowerCAmelCase )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
_a = [i[1] for i in sorted(_lowerCAmelCase )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
_a = Counter(_lowerCAmelCase ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4])) | 153 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
lowerCamelCase__ : Optional[int] = {
'configuration_ernie': ['ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ErnieConfig', 'ErnieOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = [
'ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ErnieForCausalLM',
'ErnieForMaskedLM',
'ErnieForMultipleChoice',
'ErnieForNextSentencePrediction',
'ErnieForPreTraining',
'ErnieForQuestionAnswering',
'ErnieForSequenceClassification',
'ErnieForTokenClassification',
'ErnieModel',
'ErniePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
lowerCamelCase__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 225 |
def snake_case_ ( snake_case , snake_case ) -> bool:
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 196 | 0 |
class _snake_case :
def __init__( self , a) -> Optional[Any]:
SCREAMING_SNAKE_CASE = val
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
def SCREAMING_SNAKE_CASE__ ( self , a) -> str:
if self.val:
if val < self.val:
if self.left is None:
SCREAMING_SNAKE_CASE = Node(a)
else:
self.left.insert(a)
elif val > self.val:
if self.right is None:
SCREAMING_SNAKE_CASE = Node(a)
else:
self.right.insert(a)
else:
SCREAMING_SNAKE_CASE = val
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
# Recursive traversal
if root:
inorder(root.left , _UpperCAmelCase)
res.append(root.val)
inorder(root.right , _UpperCAmelCase)
def lowerCamelCase__ (_UpperCAmelCase):
# Build BST
if len(_UpperCAmelCase) == 0:
return arr
SCREAMING_SNAKE_CASE = Node(arr[0])
for i in range(1 , len(_UpperCAmelCase)):
root.insert(arr[i])
# Traverse BST in order.
SCREAMING_SNAKE_CASE = []
inorder(_UpperCAmelCase , _UpperCAmelCase)
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 327 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _snake_case ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = TFCamembertModel.from_pretrained('jplu/tf-camembert-base')
SCREAMING_SNAKE_CASE = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
SCREAMING_SNAKE_CASE = model(a)['last_hidden_state']
SCREAMING_SNAKE_CASE = tf.TensorShape((1, 10, 768))
self.assertEqual(output.shape , a)
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE = tf.convert_to_tensor(
[[[-0.02_54, 0.02_35, 0.10_27], [0.06_06, -0.18_11, -0.04_18], [-0.15_61, -0.11_27, 0.26_87]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4))
| 327 | 1 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowerCAmelCase_ ( ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__: int =9, 14 # noqa: F841
lowerCamelCase__: List[Any] =[
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
lowerCamelCase__: List[str] =defaultdict(__a )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
lowerCamelCase__: List[str] =mst(__a )
lowerCamelCase__: Union[str, Any] =[
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
lowerCamelCase__: Optional[int] =tuple(answer[:2] )
lowerCamelCase__: List[Any] =tuple(edge[::-1] )
assert edge in result or reverse in result
| 10 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | 1 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = (IPNDMScheduler,)
lowerCAmelCase = (('''num_inference_steps''', 50),)
def a__ ( self ,**_SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ : Tuple = {'''num_train_timesteps''': 1_000}
config.update(**_SCREAMING_SNAKE_CASE )
return config
def a__ ( self ,_SCREAMING_SNAKE_CASE=0 ,**_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = dict(self.forward_default_kwargs )
UpperCAmelCase_ : int = kwargs.pop('''num_inference_steps''' ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = self.dummy_sample
UpperCAmelCase_ : Optional[Any] = 0.1 * sample
UpperCAmelCase_ : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : Tuple = self.get_scheduler_config(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
UpperCAmelCase_ : Optional[Any] = dummy_past_residuals[:]
if time_step is None:
UpperCAmelCase_ : Tuple = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = scheduler_class.from_pretrained(_SCREAMING_SNAKE_CASE )
new_scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
UpperCAmelCase_ : Optional[int] = dummy_past_residuals[:]
UpperCAmelCase_ : List[Any] = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
UpperCAmelCase_ : str = new_scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCAmelCase_ : Union[str, Any] = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
UpperCAmelCase_ : Optional[int] = new_scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def a__ ( self ) -> List[Any]:
pass
def a__ ( self ,_SCREAMING_SNAKE_CASE=0 ,**_SCREAMING_SNAKE_CASE ) -> List[Any]:
UpperCAmelCase_ : List[str] = dict(self.forward_default_kwargs )
UpperCAmelCase_ : Tuple = kwargs.pop('''num_inference_steps''' ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = self.dummy_sample
UpperCAmelCase_ : Tuple = 0.1 * sample
UpperCAmelCase_ : int = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : str = self.get_scheduler_config()
UpperCAmelCase_ : str = scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ : Tuple = dummy_past_residuals[:]
if time_step is None:
UpperCAmelCase_ : Dict = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = scheduler_class.from_pretrained(_SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
new_scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ : str = dummy_past_residuals[:]
UpperCAmelCase_ : Tuple = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
UpperCAmelCase_ : Optional[int] = new_scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCAmelCase_ : str = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
UpperCAmelCase_ : List[str] = new_scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def a__ ( self ,**_SCREAMING_SNAKE_CASE ) -> List[Any]:
UpperCAmelCase_ : Tuple = self.scheduler_classes[0]
UpperCAmelCase_ : List[Any] = self.get_scheduler_config(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = 10
UpperCAmelCase_ : Tuple = self.dummy_model()
UpperCAmelCase_ : List[str] = self.dummy_sample_deter
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase_ : int = model(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase_ : Optional[int] = model(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).prev_sample
return sample
def a__ ( self ) -> str:
UpperCAmelCase_ : List[Any] = dict(self.forward_default_kwargs )
UpperCAmelCase_ : Any = kwargs.pop('''num_inference_steps''' ,_SCREAMING_SNAKE_CASE )
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : int = self.get_scheduler_config()
UpperCAmelCase_ : List[Any] = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = self.dummy_sample
UpperCAmelCase_ : List[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(_SCREAMING_SNAKE_CASE ,'''set_timesteps''' ):
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
elif num_inference_steps is not None and not hasattr(_SCREAMING_SNAKE_CASE ,'''set_timesteps''' ):
UpperCAmelCase_ : Union[str, Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase_ : str = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
UpperCAmelCase_ : int = dummy_past_residuals[:]
UpperCAmelCase_ : List[Any] = scheduler.timesteps[5]
UpperCAmelCase_ : Dict = scheduler.timesteps[6]
UpperCAmelCase_ : List[Any] = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
UpperCAmelCase_ : Dict = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
UpperCAmelCase_ : List[Any] = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
UpperCAmelCase_ : Dict = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def a__ ( self ) -> Any:
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=_SCREAMING_SNAKE_CASE ,time_step=_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Union[str, Any]:
for t, num_inference_steps in zip([1, 5, 10] ,[10, 50, 100] ):
self.check_over_forward(num_inference_steps=_SCREAMING_SNAKE_CASE ,time_step=_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = self.full_loop()
UpperCAmelCase_ : str = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 2_540_529 ) < 10 | 356 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__a = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.encoder.norm.weight', 'encoder.layernorm.weight'),
('transformer.encoder.norm.bias', 'encoder.layernorm.bias'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
]
)
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : int = state_dict.pop(_lowercase )
UpperCAmelCase_ : Optional[int] = val
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : str = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCAmelCase_ : List[Any] = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
UpperCAmelCase_ : Optional[Any] = value
else:
UpperCAmelCase_ : Union[str, Any] = value
return new_state_dict
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : int = ''''''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCAmelCase_ : Dict = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCAmelCase_ : Any = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ : Union[str, Any] = in_proj_weight[:256, :]
UpperCAmelCase_ : Optional[int] = in_proj_bias[:256]
UpperCAmelCase_ : Tuple = in_proj_weight[256:512, :]
UpperCAmelCase_ : List[Any] = in_proj_bias[256:512]
UpperCAmelCase_ : Union[str, Any] = in_proj_weight[-256:, :]
UpperCAmelCase_ : str = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
UpperCAmelCase_ : Any = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCAmelCase_ : Optional[int] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ : Union[str, Any] = in_proj_weight[:256, :]
UpperCAmelCase_ : List[str] = in_proj_bias[:256]
UpperCAmelCase_ : Optional[int] = in_proj_weight[256:512, :]
UpperCAmelCase_ : str = in_proj_bias[256:512]
UpperCAmelCase_ : Optional[Any] = in_proj_weight[-256:, :]
UpperCAmelCase_ : Union[str, Any] = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
UpperCAmelCase_ : List[str] = state_dict.pop(
f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
UpperCAmelCase_ : List[Any] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
UpperCAmelCase_ : List[str] = in_proj_weight_cross_attn[:256, :]
UpperCAmelCase_ : str = in_proj_bias_cross_attn[:256]
UpperCAmelCase_ : int = in_proj_weight_cross_attn[256:512, :]
UpperCAmelCase_ : Tuple = in_proj_bias_cross_attn[256:512]
UpperCAmelCase_ : List[str] = in_proj_weight_cross_attn[-256:, :]
UpperCAmelCase_ : Dict = in_proj_bias_cross_attn[-256:]
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_, UpperCAmelCase_ : List[Any] = image.size
UpperCAmelCase_ : List[Any] = max(_lowercase , _lowercase )
UpperCAmelCase_ : Dict = 800 if '''detection''' in checkpoint_url else 1000
UpperCAmelCase_ : Any = target_max_size / current_max_size
UpperCAmelCase_ : Any = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = F.to_tensor(_lowercase )
UpperCAmelCase_ : Optional[Any] = F.normalize(_lowercase , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
logger.info('''Converting model...''' )
# load original state dict
UpperCAmelCase_ : Any = torch.hub.load_state_dict_from_url(_lowercase , map_location='''cpu''' )
# rename keys
for src, dest in rename_keys:
rename_key(_lowercase , _lowercase , _lowercase )
UpperCAmelCase_ : Optional[int] = rename_backbone_keys(_lowercase )
# query, key and value matrices need special treatment
read_in_q_k_v(_lowercase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCAmelCase_ : int = '''model.'''
for key in state_dict.copy().keys():
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
UpperCAmelCase_ : int = state_dict.pop(_lowercase )
UpperCAmelCase_ : Dict = val
# create HuggingFace model and load state dict
UpperCAmelCase_ : str = TableTransformerConfig(
backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
UpperCAmelCase_ : Any = 15
UpperCAmelCase_ : List[str] = 2
UpperCAmelCase_ : Union[str, Any] = {0: '''table''', 1: '''table rotated'''}
UpperCAmelCase_ : str = idalabel
UpperCAmelCase_ : Optional[int] = {v: k for k, v in idalabel.items()}
else:
UpperCAmelCase_ : Optional[Any] = 125
UpperCAmelCase_ : Optional[Any] = 6
UpperCAmelCase_ : Dict = {
0: '''table''',
1: '''table column''',
2: '''table row''',
3: '''table column header''',
4: '''table projected row header''',
5: '''table spanning cell''',
}
UpperCAmelCase_ : Optional[Any] = idalabel
UpperCAmelCase_ : Optional[int] = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ : Union[str, Any] = DetrImageProcessor(
format='''coco_detection''' , max_size=800 if '''detection''' in checkpoint_url else 1000 )
UpperCAmelCase_ : Union[str, Any] = TableTransformerForObjectDetection(_lowercase )
model.load_state_dict(_lowercase )
model.eval()
# verify our conversion
UpperCAmelCase_ : str = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png'''
UpperCAmelCase_ : Dict = hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=_lowercase )
UpperCAmelCase_ : Dict = Image.open(_lowercase ).convert('''RGB''' )
UpperCAmelCase_ : Any = normalize(resize(_lowercase , _lowercase ) ).unsqueeze(0 )
UpperCAmelCase_ : Dict = model(_lowercase )
if "detection" in checkpoint_url:
UpperCAmelCase_ : Any = (1, 15, 3)
UpperCAmelCase_ : Optional[int] = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
UpperCAmelCase_ : Any = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
UpperCAmelCase_ : Any = (1, 125, 7)
UpperCAmelCase_ : Any = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
UpperCAmelCase_ : str = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , _lowercase , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , _lowercase , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
model.save_pretrained(_lowercase )
image_processor.save_pretrained(_lowercase )
if push_to_hub:
# Push model to HF hub
logger.info('''Pushing model to the hub...''' )
UpperCAmelCase_ : List[Any] = (
'''microsoft/table-transformer-detection'''
if '''detection''' in checkpoint_url
else '''microsoft/table-transformer-structure-recognition'''
)
model.push_to_hub(_lowercase )
image_processor.push_to_hub(_lowercase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
type=str,
choices=[
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth',
],
help='URL of the Table Transformer checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__a = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub) | 235 | 0 |
'''simple docstring'''
def _UpperCamelCase ( __A , __A , __A=False ) -> int:
'''simple docstring'''
if isinstance(__A , __A ) and isinstance(__A , __A ):
UpperCamelCase__ = len(set_a.intersection(__A ) )
if alternative_union:
UpperCamelCase__ = len(__A ) + len(__A )
else:
UpperCamelCase__ = len(set_a.union(__A ) )
return intersection / union
if isinstance(__A , (list, tuple) ) and isinstance(__A , (list, tuple) ):
UpperCamelCase__ = [element for element in set_a if element in set_b]
if alternative_union:
UpperCamelCase__ = len(__A ) + len(__A )
return len(__A ) / union
else:
UpperCamelCase__ = set_a + [element for element in set_b if element not in set_a]
return len(__A ) / len(__A )
return len(__A ) / len(__A )
return None
if __name__ == "__main__":
a__ : Optional[Any] = {'a', 'b', 'c', 'd', 'e'}
a__ : int = {'c', 'd', 'e', 'f', 'h', 'i'}
print(jaccard_similarity(set_a, set_b))
| 80 |
'''simple docstring'''
a__ : Union[str, Any] = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def _UpperCamelCase ( __A ) -> int:
'''simple docstring'''
UpperCamelCase__ = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100000]
number //= 100000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
a__ : list[bool | None] = [None] * 1_0_0_0_0_0_0_0
a__ : Optional[Any] = True
a__ : Optional[Any] = False
def _UpperCamelCase ( __A ) -> bool:
'''simple docstring'''
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
UpperCamelCase__ = chain(next_number(__A ) )
UpperCamelCase__ = number_chain
while number < 10000000:
UpperCamelCase__ = number_chain
number *= 10
return number_chain
def _UpperCamelCase ( __A = 10000000 ) -> int:
'''simple docstring'''
for i in range(1 , __A ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution() = }""")
| 80 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ = {
"""configuration_vivit""": ["""VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VivitConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""VivitImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VivitModel""",
"""VivitPreTrainedModel""",
"""VivitForVideoClassification""",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 291 |
"""simple docstring"""
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
a_ = {
"""E""": 12.70,
"""T""": 9.06,
"""A""": 8.17,
"""O""": 7.51,
"""I""": 6.97,
"""N""": 6.75,
"""S""": 6.33,
"""H""": 6.09,
"""R""": 5.99,
"""D""": 4.25,
"""L""": 4.03,
"""C""": 2.78,
"""U""": 2.76,
"""M""": 2.41,
"""W""": 2.36,
"""F""": 2.23,
"""G""": 2.02,
"""Y""": 1.97,
"""P""": 1.93,
"""B""": 1.29,
"""V""": 0.98,
"""K""": 0.77,
"""J""": 0.15,
"""X""": 0.15,
"""Q""": 0.10,
"""Z""": 0.07,
}
a_ = """ETAOINSHRDLCUMWFGYPBVKJXQZ"""
a_ = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def __lowercase ( snake_case_ : str ) ->dict[str, int]:
'''simple docstring'''
__A : Any = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def __lowercase ( snake_case_ : tuple ) ->str:
'''simple docstring'''
return x[0]
def __lowercase ( snake_case_ : str ) ->str:
'''simple docstring'''
__A : Union[str, Any] = get_letter_count(snake_case_ )
__A : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(snake_case_ )
__A : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find ,reverse=snake_case_ )
__A : Optional[int] = ''''''.join(freq_to_letter[freq] )
__A : str = list(freq_to_letter_str.items() )
freq_pairs.sort(key=snake_case_ ,reverse=snake_case_ )
__A : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(snake_case_ )
def __lowercase ( snake_case_ : str ) ->int:
'''simple docstring'''
__A : Any = get_frequency_order(snake_case_ )
__A : str = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 291 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class _lowercase :
a = 42
a = None
a = None
_A : Optional[Any] =namedtuple('''CoinsDistribResult''', '''moves excess''')
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> int:
if root is None:
return 0
# Validation
def count_nodes(UpperCamelCase ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(UpperCamelCase ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(UpperCamelCase ) != count_coins(UpperCamelCase ):
raise ValueError("""The nodes number should be same as the number of coins""" )
# Main calculation
def get_distrib(UpperCamelCase ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
lowerCamelCase__ , lowerCamelCase__ : Any = get_distrib(node.left )
lowerCamelCase__ , lowerCamelCase__ : Any = get_distrib(node.right )
lowerCamelCase__ : Dict = 1 - left_distrib_excess
lowerCamelCase__ : Any = 1 - right_distrib_excess
lowerCamelCase__ : str = (
left_distrib_moves
+ right_distrib_moves
+ abs(UpperCamelCase )
+ abs(UpperCamelCase )
)
lowerCamelCase__ : Any = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(UpperCamelCase , UpperCamelCase )
return get_distrib(UpperCamelCase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41 |
'''simple docstring'''
from heapq import heappop, heappush
import numpy as np
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> tuple[float | int, list[tuple[int, int]]]:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = grid.shape
lowerCamelCase__ : List[str] = [-1, 1, 0, 0]
lowerCamelCase__ : Dict = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
lowerCamelCase__ , lowerCamelCase__ : Any = [(0, source)], set()
lowerCamelCase__ : Tuple = np.full((rows, cols) , np.inf )
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : Optional[int] = np.empty((rows, cols) , dtype=UpperCamelCase )
lowerCamelCase__ : str = None
while queue:
((lowerCamelCase__) , (lowerCamelCase__)) : List[str] = heappop(UpperCamelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
lowerCamelCase__ : Optional[int] = []
while (x, y) != source:
path.append((x, y) )
lowerCamelCase__ , lowerCamelCase__ : List[Any] = predecessors[x, y]
path.append(UpperCamelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(UpperCamelCase ) ):
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
lowerCamelCase__ : Any = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(UpperCamelCase , (dist + 1, (nx, ny)) )
lowerCamelCase__ : Union[str, Any] = dist + 1
lowerCamelCase__ : List[str] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 217 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_SCREAMING_SNAKE_CASE = {
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 217 | 1 |
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class A ( yaml.SafeLoader ):
def lowerCamelCase ( self : Optional[Any] , lowercase_ : str ) -> Any:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] =[self.constructed_objects[key_node] for key_node, _ in node.value]
_lowerCamelCase : Optional[int] =[tuple(lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else key for key in keys]
_lowerCamelCase : Optional[Any] =Counter(lowercase_ )
_lowerCamelCase : Any =[key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F'''Got duplicate yaml keys: {duplicate_keys}''' )
def lowerCamelCase ( self : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any]=False ) -> Dict:
"""simple docstring"""
_lowerCamelCase : str =super().construct_mapping(lowercase_ , deep=lowercase_ )
self._check_no_duplicates_on_constructed_node(lowercase_ )
return mapping
def a_ ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
_lowerCamelCase : Any =list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
_lowerCamelCase : List[str] =full_content[1:].index('---' ) + 1
_lowerCamelCase : str ="\n".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(_lowerCamelCase )
class A ( __A ):
# class attributes
UpperCamelCase__ : Dict ={"train_eval_index"} # train-eval-index in the YAML metadata
@classmethod
def lowerCamelCase ( cls : List[str] , lowercase_ : Path ) -> "DatasetMetadata":
"""simple docstring"""
with open(lowercase_ , encoding='utf-8' ) as readme_file:
_lowerCamelCase : List[str] =_split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(lowercase_ )
else:
return cls()
def lowerCamelCase ( self : Dict , lowercase_ : Path ) -> Optional[int]:
"""simple docstring"""
if path.exists():
with open(lowercase_ , encoding='utf-8' ) as readme_file:
_lowerCamelCase : Optional[Any] =readme_file.read()
else:
_lowerCamelCase : Any =None
_lowerCamelCase : Any =self._to_readme(lowercase_ )
with open(lowercase_ , 'w' , encoding='utf-8' ) as readme_file:
readme_file.write(lowercase_ )
def lowerCamelCase ( self : int , lowercase_ : Optional[str] = None ) -> str:
"""simple docstring"""
if readme_content is not None:
_lowerCamelCase : Tuple =_split_yaml_from_readme(lowercase_ )
_lowerCamelCase : str ="---\n" + self.to_yaml_string() + "---\n" + content
else:
_lowerCamelCase : Optional[int] ="---\n" + self.to_yaml_string() + "---\n"
return full_content
@classmethod
def lowerCamelCase ( cls : List[str] , lowercase_ : str ) -> "DatasetMetadata":
"""simple docstring"""
_lowerCamelCase : Optional[int] =yaml.load(lowercase_ , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
_lowerCamelCase : int ={
(key.replace('-' , '_' ) if key.replace('-' , '_' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**lowercase_ )
def lowerCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
return yaml.safe_dump(
{
(key.replace('_' , '-' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=lowercase_ , allow_unicode=lowercase_ , encoding='utf-8' , ).decode('utf-8' )
lowerCamelCase = {
'image-classification': [],
'translation': [],
'image-segmentation': [],
'fill-mask': [],
'automatic-speech-recognition': [],
'token-classification': [],
'sentence-similarity': [],
'audio-classification': [],
'question-answering': [],
'summarization': [],
'zero-shot-classification': [],
'table-to-text': [],
'feature-extraction': [],
'other': [],
'multiple-choice': [],
'text-classification': [],
'text-to-image': [],
'text2text-generation': [],
'zero-shot-image-classification': [],
'tabular-classification': [],
'tabular-regression': [],
'image-to-image': [],
'tabular-to-text': [],
'unconditional-image-generation': [],
'text-retrieval': [],
'text-to-speech': [],
'object-detection': [],
'audio-to-audio': [],
'text-generation': [],
'conversational': [],
'table-question-answering': [],
'visual-question-answering': [],
'image-to-text': [],
'reinforcement-learning': [],
'voice-activity-detection': [],
'time-series-forecasting': [],
'document-question-answering': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
lowerCamelCase = ArgumentParser(usage='Validate the yaml metadata block of a README.md file.')
ap.add_argument('readme_filepath')
lowerCamelCase = ap.parse_args()
lowerCamelCase = Path(args.readme_filepath)
lowerCamelCase = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 199 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 87 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowerCamelCase : Union[str, Any] = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : str = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = ["CLIPFeatureExtractor"]
_lowerCamelCase : List[str] = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[int] = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : str = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 365 |
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class __snake_case (_a , _a , unittest.TestCase ):
lowerCAmelCase__ = IFPipeline
lowerCAmelCase__ = TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"}
lowerCAmelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase__ = PipelineTesterMixin.required_optional_params - {"latents"}
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
return self._get_dummy_components()
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str]=0 ) -> Optional[Any]:
'''simple docstring'''
if str(_UpperCAmelCase ).startswith("""mps""" ):
_lowerCAmelCase : Tuple = torch.manual_seed(_UpperCAmelCase )
else:
_lowerCAmelCase : str = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
_lowerCAmelCase : List[str] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def SCREAMING_SNAKE_CASE ( self : int ) -> str:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
self._test_save_load_local()
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class __snake_case (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : str ) -> Any:
'''simple docstring'''
_lowerCAmelCase : str = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa )
_lowerCAmelCase : Tuple = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""" )
_lowerCAmelCase , _lowerCAmelCase : Tuple = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Optional[int] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
_lowerCAmelCase : Optional[Any] = IFImgaImgPipeline(**pipe_a.components )
_lowerCAmelCase : Optional[int] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
_lowerCAmelCase : List[Any] = IFInpaintingPipeline(**pipe_a.components )
_lowerCAmelCase : Optional[Any] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : Any ) -> Any:
'''simple docstring'''
_start_torch_memory_measurement()
_lowerCAmelCase : str = torch.Generator(device="""cpu""" ).manual_seed(0 )
_lowerCAmelCase : Optional[int] = pipe_a(
prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , num_inference_steps=2 , generator=_UpperCAmelCase , output_type="""np""" , )
_lowerCAmelCase : List[str] = output.images[0]
assert image.shape == (64, 64, 3)
_lowerCAmelCase : Optional[int] = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
_lowerCAmelCase : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""" )
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase : str = torch.Generator(device="""cpu""" ).manual_seed(0 )
_lowerCAmelCase : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_UpperCAmelCase )
_lowerCAmelCase : List[str] = pipe_a(
prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , image=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=2 , output_type="""np""" , )
_lowerCAmelCase : Optional[Any] = output.images[0]
assert image.shape == (256, 256, 3)
_lowerCAmelCase : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCAmelCase : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
_start_torch_memory_measurement()
_lowerCAmelCase : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
_lowerCAmelCase : Optional[int] = pipe_a(
prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , image=_UpperCAmelCase , num_inference_steps=2 , generator=_UpperCAmelCase , output_type="""np""" , )
_lowerCAmelCase : Union[str, Any] = output.images[0]
assert image.shape == (64, 64, 3)
_lowerCAmelCase : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCAmelCase : List[str] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""" )
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase : str = torch.Generator(device="""cpu""" ).manual_seed(0 )
_lowerCAmelCase : List[Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(_UpperCAmelCase )
_lowerCAmelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_UpperCAmelCase )
_lowerCAmelCase : List[str] = pipe_a(
prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , image=_UpperCAmelCase , original_image=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=2 , output_type="""np""" , )
_lowerCAmelCase : List[Any] = output.images[0]
assert image.shape == (256, 256, 3)
_lowerCAmelCase : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCAmelCase : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Dict ) -> List[Any]:
'''simple docstring'''
_start_torch_memory_measurement()
_lowerCAmelCase : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_UpperCAmelCase )
_lowerCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(_UpperCAmelCase )
_lowerCAmelCase : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
_lowerCAmelCase : Optional[int] = pipe_a(
prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , num_inference_steps=2 , generator=_UpperCAmelCase , output_type="""np""" , )
_lowerCAmelCase : List[str] = output.images[0]
assert image.shape == (64, 64, 3)
_lowerCAmelCase : Optional[int] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCAmelCase : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""" )
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
_lowerCAmelCase : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_UpperCAmelCase )
_lowerCAmelCase : Tuple = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(_UpperCAmelCase )
_lowerCAmelCase : int = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(_UpperCAmelCase )
_lowerCAmelCase : Optional[int] = pipe_a(
prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , original_image=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=2 , output_type="""np""" , )
_lowerCAmelCase : Dict = output.images[0]
assert image.shape == (256, 256, 3)
_lowerCAmelCase : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCAmelCase : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
def _UpperCAmelCase ():
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 159 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FocalNetForImageClassification""",
"""FocalNetForMaskedImageModeling""",
"""FocalNetBackbone""",
"""FocalNetModel""",
"""FocalNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 176 |
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class lowercase__ ( _UpperCAmelCase , unittest.TestCase ):
A__ : str =DebertaVaTokenizer
A__ : List[str] =DebertaVaTokenizerFast
A__ : Any =True
A__ : str =True
def A_ ( self : Tuple ):
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizer(UpperCAmelCase_ , unk_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def A_ ( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any] ):
SCREAMING_SNAKE_CASE__ = 'this is a test'
SCREAMING_SNAKE_CASE__ = 'this is a test'
return input_text, output_text
def A_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ = '<pad>'
SCREAMING_SNAKE_CASE__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) , UpperCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) , UpperCAmelCase_ )
def A_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '[PAD]' )
self.assertEqual(len(UpperCAmelCase_ ) , 30001 )
def A_ ( self : str ):
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def A_ ( self : Optional[Any] ):
# fmt: off
SCREAMING_SNAKE_CASE__ = ' \tHeLLo!how \n Are yoU? '
SCREAMING_SNAKE_CASE__ = ['▁hello', '!', 'how', '▁are', '▁you', '?']
# fmt: on
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizer(UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizerFast(UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.' )
def A_ ( self : Any ):
pass
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.' )
def A_ ( self : Tuple ):
pass
def A_ ( self : List[str] ):
# fmt: off
SCREAMING_SNAKE_CASE__ = 'I was born in 92000, and this is falsé.'
SCREAMING_SNAKE_CASE__ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizer(UpperCAmelCase_ , split_by_punct=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizerFast(UpperCAmelCase_ , split_by_punct=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : List[str] ):
# fmt: off
SCREAMING_SNAKE_CASE__ = 'I was born in 92000, and this is falsé.'
SCREAMING_SNAKE_CASE__ = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizer(UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , split_by_punct=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizerFast(UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , split_by_punct=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : int ):
# fmt: off
SCREAMING_SNAKE_CASE__ = 'I was born in 92000, and this is falsé.'
SCREAMING_SNAKE_CASE__ = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizer(UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , split_by_punct=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizerFast(UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , split_by_punct=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : Tuple ):
# fmt: off
SCREAMING_SNAKE_CASE__ = 'I was born in 92000, and this is falsé.'
SCREAMING_SNAKE_CASE__ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizer(UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , split_by_punct=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizerFast(UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , split_by_punct=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : Any ):
# fmt: off
SCREAMING_SNAKE_CASE__ = ' \tHeLLo!how \n Are yoU? '
SCREAMING_SNAKE_CASE__ = ['▁', '<unk>', 'e', '<unk>', 'o', '!', 'how', '▁', '<unk>', 're', '▁yo', '<unk>', '?']
# fmt: on
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizer(UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , split_by_punct=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizerFast(UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , split_by_punct=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : int ):
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ = 'I was born in 92000, and this is falsé.'
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ = tokenizer.encode(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.encode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = 'This is a test'
SCREAMING_SNAKE_CASE__ = [13, 1, 4398, 25, 21, 1289]
SCREAMING_SNAKE_CASE__ = ['▁', 'T', 'his', '▁is', '▁a', '▁test']
SCREAMING_SNAKE_CASE__ = ['▁', '<unk>', 'his', '▁is', '▁a', '▁test']
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizerFast(UpperCAmelCase_ , keep_accents=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
# fmt: off
SCREAMING_SNAKE_CASE__ = 'I was born in 92000, and this is falsé.'
SCREAMING_SNAKE_CASE__ = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
SCREAMING_SNAKE_CASE__ = ['▁', 'I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.', ]
SCREAMING_SNAKE_CASE__ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
SCREAMING_SNAKE_CASE__ = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizer(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.encode('sequence builders' )
SCREAMING_SNAKE_CASE__ = tokenizer.encode('multi-sequence build' )
SCREAMING_SNAKE_CASE__ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , UpperCAmelCase_ )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , UpperCAmelCase_ , )
@slow
def A_ ( self : Optional[Any] ):
# fmt: off
SCREAMING_SNAKE_CASE__ = {'input_ids': [[1, 39867, 36, 19390, 486, 27, 35052, 81436, 18, 60685, 1225, 7, 35052, 81436, 18, 9367, 16899, 18, 15937, 53, 594, 773, 18, 16287, 30465, 36, 15937, 6, 41139, 38, 36979, 60763, 191, 6, 34132, 99, 6, 50538, 390, 43230, 6, 34132, 2779, 20850, 14, 699, 1072, 1194, 36, 382, 10901, 53, 7, 699, 1072, 2084, 36, 20422, 630, 53, 19, 105, 3049, 1896, 1053, 16899, 1506, 11, 37978, 4243, 7, 1237, 31869, 200, 16566, 654, 6, 35052, 81436, 7, 55630, 13593, 4, 2], [1, 26, 15011, 13, 667, 8, 1053, 18, 23611, 1237, 72356, 12820, 34, 104134, 1209, 35, 13313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 15785, 14951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ , model_name='microsoft/deberta-v2-xlarge' , revision='ad6e42c1532ddf3a15c39246b63f5559d558b670' , )
| 176 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class A ( unittest.TestCase ):
def __init__( self, UpperCamelCase__, UpperCamelCase__=7, UpperCamelCase__=3, UpperCamelCase__=18, UpperCamelCase__=30, UpperCamelCase__=400, UpperCamelCase__=True, UpperCamelCase__=None, UpperCamelCase__=True, UpperCamelCase__=None, UpperCamelCase__=True, UpperCamelCase__=[0.5, 0.5, 0.5], UpperCamelCase__=[0.5, 0.5, 0.5], ):
"""simple docstring"""
lowerCAmelCase_ = size if size is not None else {'''shortest_edge''': 18}
lowerCAmelCase_ = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = image_size
lowerCAmelCase_ = min_resolution
lowerCAmelCase_ = max_resolution
lowerCAmelCase_ = do_resize
lowerCAmelCase_ = size
lowerCAmelCase_ = do_center_crop
lowerCAmelCase_ = crop_size
lowerCAmelCase_ = do_normalize
lowerCAmelCase_ = image_mean
lowerCAmelCase_ = image_std
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class A ( __UpperCAmelCase , unittest.TestCase ):
__snake_case = LevitImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = LevitImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__, '''image_mean''' ) )
self.assertTrue(hasattr(UpperCamelCase__, '''image_std''' ) )
self.assertTrue(hasattr(UpperCamelCase__, '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCamelCase__, '''do_resize''' ) )
self.assertTrue(hasattr(UpperCamelCase__, '''do_center_crop''' ) )
self.assertTrue(hasattr(UpperCamelCase__, '''size''' ) )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size, {'''height''': 18, '''width''': 18} )
lowerCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84 )
self.assertEqual(image_processor.size, {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size, {'''height''': 84, '''width''': 84} )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase_ = prepare_image_inputs(self.image_processor_tester, equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__, Image.Image )
# Test not batched input
lowerCAmelCase_ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowerCAmelCase_ = image_processing(UpperCamelCase__, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase_ = prepare_image_inputs(self.image_processor_tester, equal_resolution=UpperCamelCase__, numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__, np.ndarray )
# Test not batched input
lowerCAmelCase_ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowerCAmelCase_ = image_processing(UpperCamelCase__, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase_ = prepare_image_inputs(self.image_processor_tester, equal_resolution=UpperCamelCase__, torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__, torch.Tensor )
# Test not batched input
lowerCAmelCase_ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowerCAmelCase_ = image_processing(UpperCamelCase__, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
| 167 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class A ( unittest.TestCase ):
def __init__( self, UpperCamelCase__, UpperCamelCase__=7, UpperCamelCase__=3, UpperCamelCase__=30, UpperCamelCase__=400, UpperCamelCase__=True, UpperCamelCase__=None, UpperCamelCase__=True, UpperCamelCase__=[0.5, 0.5, 0.5], UpperCamelCase__=[0.5, 0.5, 0.5], UpperCamelCase__=True, UpperCamelCase__=1 / 255, UpperCamelCase__=True, ):
"""simple docstring"""
lowerCAmelCase_ = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = min_resolution
lowerCAmelCase_ = max_resolution
lowerCAmelCase_ = do_resize
lowerCAmelCase_ = size
lowerCAmelCase_ = do_normalize
lowerCAmelCase_ = image_mean
lowerCAmelCase_ = image_std
lowerCAmelCase_ = do_rescale
lowerCAmelCase_ = rescale_factor
lowerCAmelCase_ = do_pad
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__=False ):
"""simple docstring"""
if not batched:
lowerCAmelCase_ = image_inputs[0]
if isinstance(UpperCamelCase__, Image.Image ):
lowerCAmelCase_ , lowerCAmelCase_ = image.size
else:
lowerCAmelCase_ , lowerCAmelCase_ = image.shape[1], image.shape[2]
if w < h:
lowerCAmelCase_ = int(self.size['''shortest_edge'''] * h / w )
lowerCAmelCase_ = self.size['''shortest_edge''']
elif w > h:
lowerCAmelCase_ = self.size['''shortest_edge''']
lowerCAmelCase_ = int(self.size['''shortest_edge'''] * w / h )
else:
lowerCAmelCase_ = self.size['''shortest_edge''']
lowerCAmelCase_ = self.size['''shortest_edge''']
else:
lowerCAmelCase_ = []
for image in image_inputs:
lowerCAmelCase_ , lowerCAmelCase_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCAmelCase_ = max(UpperCamelCase__, key=lambda UpperCamelCase__ : item[0] )[0]
lowerCAmelCase_ = max(UpperCamelCase__, key=lambda UpperCamelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A ( __UpperCAmelCase , unittest.TestCase ):
__snake_case = DeformableDetrImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = DeformableDetrImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__, '''image_mean''' ) )
self.assertTrue(hasattr(UpperCamelCase__, '''image_std''' ) )
self.assertTrue(hasattr(UpperCamelCase__, '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCamelCase__, '''do_resize''' ) )
self.assertTrue(hasattr(UpperCamelCase__, '''do_rescale''' ) )
self.assertTrue(hasattr(UpperCamelCase__, '''do_pad''' ) )
self.assertTrue(hasattr(UpperCamelCase__, '''size''' ) )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad, UpperCamelCase__ )
lowerCAmelCase_ = self.image_processing_class.from_dict(
self.image_processor_dict, size=42, max_size=84, pad_and_return_pixel_mask=UpperCamelCase__ )
self.assertEqual(image_processor.size, {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad, UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase_ = prepare_image_inputs(self.image_processor_tester, equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__, Image.Image )
# Test not batched input
lowerCAmelCase_ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
lowerCAmelCase_ , lowerCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
lowerCAmelCase_ , lowerCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase__, batched=UpperCamelCase__ )
lowerCAmelCase_ = image_processing(UpperCamelCase__, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase_ = prepare_image_inputs(self.image_processor_tester, equal_resolution=UpperCamelCase__, numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__, np.ndarray )
# Test not batched input
lowerCAmelCase_ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
lowerCAmelCase_ , lowerCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
lowerCAmelCase_ = image_processing(UpperCamelCase__, return_tensors='''pt''' ).pixel_values
lowerCAmelCase_ , lowerCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase__, batched=UpperCamelCase__ )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase_ = prepare_image_inputs(self.image_processor_tester, equal_resolution=UpperCamelCase__, torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__, torch.Tensor )
# Test not batched input
lowerCAmelCase_ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
lowerCAmelCase_ , lowerCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
lowerCAmelCase_ = image_processing(UpperCamelCase__, return_tensors='''pt''' ).pixel_values
lowerCAmelCase_ , lowerCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase__, batched=UpperCamelCase__ )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''', '''r''' ) as f:
lowerCAmelCase_ = json.loads(f.read() )
lowerCAmelCase_ = {'''image_id''': 3_9769, '''annotations''': target}
# encode them
lowerCAmelCase_ = DeformableDetrImageProcessor()
lowerCAmelCase_ = image_processing(images=UpperCamelCase__, annotations=UpperCamelCase__, return_tensors='''pt''' )
# verify pixel values
lowerCAmelCase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape, UpperCamelCase__ )
lowerCAmelCase_ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3], UpperCamelCase__, atol=1E-4 ) )
# verify area
lowerCAmelCase_ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''], UpperCamelCase__ ) )
# verify boxes
lowerCAmelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape, UpperCamelCase__ )
lowerCAmelCase_ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0], UpperCamelCase__, atol=1E-3 ) )
# verify image_id
lowerCAmelCase_ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''], UpperCamelCase__ ) )
# verify is_crowd
lowerCAmelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''], UpperCamelCase__ ) )
# verify class_labels
lowerCAmelCase_ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''], UpperCamelCase__ ) )
# verify orig_size
lowerCAmelCase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''], UpperCamelCase__ ) )
# verify size
lowerCAmelCase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''], UpperCamelCase__ ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''', '''r''' ) as f:
lowerCAmelCase_ = json.loads(f.read() )
lowerCAmelCase_ = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9769, '''segments_info''': target}
lowerCAmelCase_ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
lowerCAmelCase_ = DeformableDetrImageProcessor(format='''coco_panoptic''' )
lowerCAmelCase_ = image_processing(images=UpperCamelCase__, annotations=UpperCamelCase__, masks_path=UpperCamelCase__, return_tensors='''pt''' )
# verify pixel values
lowerCAmelCase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape, UpperCamelCase__ )
lowerCAmelCase_ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3], UpperCamelCase__, atol=1E-4 ) )
# verify area
lowerCAmelCase_ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''], UpperCamelCase__ ) )
# verify boxes
lowerCAmelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape, UpperCamelCase__ )
lowerCAmelCase_ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0], UpperCamelCase__, atol=1E-3 ) )
# verify image_id
lowerCAmelCase_ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''], UpperCamelCase__ ) )
# verify is_crowd
lowerCAmelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''], UpperCamelCase__ ) )
# verify class_labels
lowerCAmelCase_ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''], UpperCamelCase__ ) )
# verify masks
lowerCAmelCase_ = 82_2873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item(), UpperCamelCase__ )
# verify orig_size
lowerCAmelCase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''], UpperCamelCase__ ) )
# verify size
lowerCAmelCase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''], UpperCamelCase__ ) )
| 167 | 1 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class __SCREAMING_SNAKE_CASE ( unittest.TestCase , A__ ):
def __lowerCamelCase ( self ):
lowercase : List[Any] = load_tool('''text-classification''' )
self.tool.setup()
lowercase : Tuple = load_tool('''text-classification''' , remote=SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : List[Any] = self.tool('''That\'s quite cool''' , ['''positive''', '''negative'''] )
self.assertEqual(SCREAMING_SNAKE_CASE__ , '''positive''' )
def __lowerCamelCase ( self ):
lowercase : Union[str, Any] = self.remote_tool('''That\'s quite cool''' , ['''positive''', '''negative'''] )
self.assertEqual(SCREAMING_SNAKE_CASE__ , '''positive''' )
def __lowerCamelCase ( self ):
lowercase : Union[str, Any] = self.tool(text='''That\'s quite cool''' , labels=['''positive''', '''negative'''] )
self.assertEqual(SCREAMING_SNAKE_CASE__ , '''positive''' )
def __lowerCamelCase ( self ):
lowercase : Any = self.remote_tool(text='''That\'s quite cool''' , labels=['''positive''', '''negative'''] )
self.assertEqual(SCREAMING_SNAKE_CASE__ , '''positive''' )
| 337 |
from math import factorial
def lowerCamelCase__ (_UpperCAmelCase = 100):
return sum(int(_UpperCAmelCase) for x in str(factorial(_UpperCAmelCase)))
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 137 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json''',
}
class __A( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """gpt_neox_japanese"""
def __init__(self , SCREAMING_SNAKE_CASE_=3_20_00 , SCREAMING_SNAKE_CASE_=25_60 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=1.00 , SCREAMING_SNAKE_CASE_=1_00_00 , SCREAMING_SNAKE_CASE_=20_48 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=3_19_96 , SCREAMING_SNAKE_CASE_=3_19_99 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.0 , **SCREAMING_SNAKE_CASE_ , ):
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_multiple_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = rotary_pct
UpperCamelCase__ = rotary_emb_base
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = use_cache
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = hidden_dropout
| 353 |
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
lowerCamelCase_ = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase )
class __A( __lowerCamelCase ):
"""simple docstring"""
def __init__(self , **SCREAMING_SNAKE_CASE_ ):
super().__init__(**SCREAMING_SNAKE_CASE_ )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
self.check_model_type(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = {}
UpperCamelCase__ = {}
UpperCamelCase__ = {}
# preprocess args
if "points_per_batch" in kwargs:
UpperCamelCase__ = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
UpperCamelCase__ = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
UpperCamelCase__ = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
UpperCamelCase__ = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
UpperCamelCase__ = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
UpperCamelCase__ = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
UpperCamelCase__ = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
UpperCamelCase__ = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
UpperCamelCase__ = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
UpperCamelCase__ = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
UpperCamelCase__ = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
UpperCamelCase__ = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__(self , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ):
return super().__call__(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , num_workers=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = 5_12 / 15_00 , SCREAMING_SNAKE_CASE_ = 32 , SCREAMING_SNAKE_CASE_ = 1 , ):
UpperCamelCase__ = load_image(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.image_processor.size["""longest_edge"""]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.image_processor.generate_crop_boxes(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
UpperCamelCase__ = self.get_inference_context()
with inference_context():
UpperCamelCase__ = self._ensure_tensor_on_device(SCREAMING_SNAKE_CASE_ , device=self.device )
UpperCamelCase__ = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
UpperCamelCase__ = image_embeddings
UpperCamelCase__ = grid_points.shape[1]
UpperCamelCase__ = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = grid_points[:, i : i + points_per_batch, :, :]
UpperCamelCase__ = input_labels[:, i : i + points_per_batch]
UpperCamelCase__ = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0.88 , SCREAMING_SNAKE_CASE_=0.95 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=1 , ):
UpperCamelCase__ = model_inputs.pop("""input_boxes""" )
UpperCamelCase__ = model_inputs.pop("""is_last""" )
UpperCamelCase__ = model_inputs.pop("""original_sizes""" ).tolist()
UpperCamelCase__ = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
UpperCamelCase__ = self.model(**SCREAMING_SNAKE_CASE_ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
UpperCamelCase__ = model_outputs["""pred_masks"""]
UpperCamelCase__ = self.image_processor.post_process_masks(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , binarize=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model_outputs["""iou_scores"""]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=0.7 , ):
UpperCamelCase__ = []
UpperCamelCase__ = []
UpperCamelCase__ = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
UpperCamelCase__ = torch.cat(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = torch.cat(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.image_processor.post_process_for_mask_generation(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = defaultdict(SCREAMING_SNAKE_CASE_ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = {}
if output_rle_mask:
UpperCamelCase__ = rle_mask
if output_bboxes_mask:
UpperCamelCase__ = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 178 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
lowercase__ :str = logging.get_logger(__name__)
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Union[str, Any] ='''upernet'''
def __init__( self ,A__=None ,A__=5_1_2 ,A__=0.02 ,A__=[1, 2, 3, 6] ,A__=True ,A__=0.4 ,A__=3_8_4 ,A__=2_5_6 ,A__=1 ,A__=False ,A__=2_5_5 ,**A__ ,):
super().__init__(**A__)
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''')
lowercase = CONFIG_MAPPING['''resnet'''](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''])
elif isinstance(A__ ,A__):
lowercase = backbone_config.get('''model_type''')
lowercase = CONFIG_MAPPING[backbone_model_type]
lowercase = config_class.from_dict(A__)
lowercase = backbone_config
lowercase = hidden_size
lowercase = initializer_range
lowercase = pool_scales
lowercase = use_auxiliary_head
lowercase = auxiliary_loss_weight
lowercase = auxiliary_in_channels
lowercase = auxiliary_channels
lowercase = auxiliary_num_convs
lowercase = auxiliary_concat_input
lowercase = loss_ignore_index
def A__ ( self):
lowercase = copy.deepcopy(self.__dict__)
lowercase = self.backbone_config.to_dict()
lowercase = self.__class__.model_type
return output
| 101 |
'''simple docstring'''
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=[] ):
"""simple docstring"""
_lowerCAmelCase = size[0] - overlap_pixels * 2
_lowerCAmelCase = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
_lowerCAmelCase = np.ones((size_y, size_x) , dtype=np.uinta ) * 2_55
_lowerCAmelCase = np.pad(lowerCAmelCase , mode="""linear_ramp""" , pad_width=lowerCAmelCase , end_values=0 )
if "l" in remove_borders:
_lowerCAmelCase = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
_lowerCAmelCase = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
_lowerCAmelCase = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
_lowerCAmelCase = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
return max(lowerCAmelCase , min(lowerCAmelCase , lowerCAmelCase ) )
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = list(lowerCAmelCase )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
_lowerCAmelCase = clamp_rect(lowerCAmelCase , [0, 0] , [image_size[0], image_size[1]] )
return rect
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = Image.new("""RGB""" , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(lowerCAmelCase , (original_slice, 0) )
return result
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
_lowerCAmelCase = tile.crop(lowerCAmelCase )
return tile
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = n % d
return n - divisor
class UpperCAmelCase ( snake_case_ ):
def __init__( self : List[Any] , __snake_case : AutoencoderKL , __snake_case : CLIPTextModel , __snake_case : CLIPTokenizer , __snake_case : UNetaDConditionModel , __snake_case : DDPMScheduler , __snake_case : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __snake_case : int = 3_50 , ) -> int:
super().__init__(
vae=__snake_case , text_encoder=__snake_case , tokenizer=__snake_case , unet=__snake_case , low_res_scheduler=__snake_case , scheduler=__snake_case , max_noise_level=__snake_case , )
def lowercase__ ( self : List[Any] , __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : Dict , __snake_case : List[Any] , __snake_case : int , __snake_case : Optional[Any] , __snake_case : Any , **__snake_case : str ) -> int:
torch.manual_seed(0 )
_lowerCAmelCase = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
_lowerCAmelCase = add_overlap_rect(__snake_case , __snake_case , image.size )
_lowerCAmelCase = image.crop(__snake_case )
_lowerCAmelCase = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
_lowerCAmelCase = translated_slice_x - (original_image_slice / 2)
_lowerCAmelCase = max(0 , __snake_case )
_lowerCAmelCase = squeeze_tile(__snake_case , __snake_case , __snake_case , __snake_case )
_lowerCAmelCase = to_input.size
_lowerCAmelCase = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
_lowerCAmelCase = super(__snake_case , self ).__call__(image=__snake_case , **__snake_case ).images[0]
_lowerCAmelCase = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
_lowerCAmelCase = unsqueeze_tile(__snake_case , __snake_case )
_lowerCAmelCase = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
_lowerCAmelCase = []
if x == 0:
remove_borders.append("""l""" )
elif crop_rect[2] == image.size[0]:
remove_borders.append("""r""" )
if y == 0:
remove_borders.append("""t""" )
elif crop_rect[3] == image.size[1]:
remove_borders.append("""b""" )
_lowerCAmelCase = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=__snake_case ) , mode="""L""" , )
final_image.paste(
__snake_case , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , __snake_case )
@torch.no_grad()
def __call__( self : Union[str, Any] , __snake_case : Union[str, List[str]] , __snake_case : Union[PIL.Image.Image, List[PIL.Image.Image]] , __snake_case : int = 75 , __snake_case : float = 9.0 , __snake_case : int = 50 , __snake_case : Optional[Union[str, List[str]]] = None , __snake_case : Optional[int] = 1 , __snake_case : float = 0.0 , __snake_case : Optional[torch.Generator] = None , __snake_case : Optional[torch.FloatTensor] = None , __snake_case : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __snake_case : int = 1 , __snake_case : int = 1_28 , __snake_case : int = 32 , __snake_case : int = 32 , ) -> str:
_lowerCAmelCase = Image.new("""RGB""" , (image.size[0] * 4, image.size[1] * 4) )
_lowerCAmelCase = math.ceil(image.size[0] / tile_size )
_lowerCAmelCase = math.ceil(image.size[1] / tile_size )
_lowerCAmelCase = tcx * tcy
_lowerCAmelCase = 0
for y in range(__snake_case ):
for x in range(__snake_case ):
self._process_tile(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , prompt=__snake_case , num_inference_steps=__snake_case , guidance_scale=__snake_case , noise_level=__snake_case , negative_prompt=__snake_case , num_images_per_prompt=__snake_case , eta=__snake_case , generator=__snake_case , latents=__snake_case , )
current_count += 1
if callback is not None:
callback({"""progress""": current_count / total_tile_count, """image""": final_image} )
return final_image
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = """stabilityai/stable-diffusion-x4-upscaler"""
_lowerCAmelCase = StableDiffusionTiledUpscalePipeline.from_pretrained(lowerCAmelCase , revision="""fp16""" , torch_dtype=torch.floataa )
_lowerCAmelCase = pipe.to("""cuda""" )
_lowerCAmelCase = Image.open("""../../docs/source/imgs/diffusers_library.jpg""" )
def callback(lowerCAmelCase ):
print(f"progress: {obj['progress']:.4f}" )
obj["image"].save("""diffusers_library_progress.jpg""" )
_lowerCAmelCase = pipe(image=lowerCAmelCase , prompt="""Black font, white background, vector""" , noise_level=40 , callback=lowerCAmelCase )
final_image.save("""diffusers_library.jpg""" )
if __name__ == "__main__":
main()
| 70 | 0 |
'''simple docstring'''
import socket
def a_ ( ):
lowerCAmelCase = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
lowerCAmelCase = socket.gethostname()
lowerCAmelCase = 12312
sock.connect((host, port) )
sock.send(b'Hello server!' )
with open('Received_file' , 'wb' ) as out_file:
print('File opened' )
print('Receiving data...' )
while True:
lowerCAmelCase = sock.recv(1024 )
if not data:
break
out_file.write(lowerCamelCase )
print('Successfully received the file' )
sock.close()
print('Connection closed' )
if __name__ == "__main__":
main()
| 370 |
'''simple docstring'''
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def a_ ( lowerCamelCase : int ):
lowerCAmelCase = torch.exp(lowerCamelCase )
lowerCAmelCase = torch.sum(lowerCamelCase , dim=1 ) # sum of exp(x_i)
lowerCAmelCase = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(lowerCamelCase ) - B / A
class UpperCAmelCase_ ( nn.Module ):
def __init__( self : int , UpperCAmelCase__ : int ) -> str:
super().__init__()
lowerCAmelCase = config.output_attentions
lowerCAmelCase = config.output_hidden_states
lowerCAmelCase = nn.ModuleList([BertLayer(UpperCAmelCase__ ) for _ in range(config.num_hidden_layers )] )
lowerCAmelCase = nn.ModuleList([BertHighway(UpperCAmelCase__ ) for _ in range(config.num_hidden_layers )] )
lowerCAmelCase = [-1 for _ in range(config.num_hidden_layers )]
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : str ) -> int:
if (type(UpperCAmelCase__ ) is float) or (type(UpperCAmelCase__ ) is int):
for i in range(len(self.early_exit_entropy ) ):
lowerCAmelCase = x
else:
lowerCAmelCase = x
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : List[str] ) -> Optional[Any]:
lowerCAmelCase = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Optional[int]=None , ) -> str:
lowerCAmelCase = ()
lowerCAmelCase = ()
lowerCAmelCase = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
lowerCAmelCase = all_hidden_states + (hidden_states,)
lowerCAmelCase = layer_module(
UpperCAmelCase__ , UpperCAmelCase__ , head_mask[i] , UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = layer_outputs[0]
if self.output_attentions:
lowerCAmelCase = all_attentions + (layer_outputs[1],)
lowerCAmelCase = (hidden_states,)
if self.output_hidden_states:
lowerCAmelCase = current_outputs + (all_hidden_states,)
if self.output_attentions:
lowerCAmelCase = current_outputs + (all_attentions,)
lowerCAmelCase = self.highway[i](UpperCAmelCase__ )
# logits, pooled_output
if not self.training:
lowerCAmelCase = highway_exit[0]
lowerCAmelCase = entropy(UpperCAmelCase__ )
lowerCAmelCase = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
lowerCAmelCase = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
lowerCAmelCase = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(UpperCAmelCase__ , i + 1 )
else:
lowerCAmelCase = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
lowerCAmelCase = all_hidden_states + (hidden_states,)
lowerCAmelCase = (hidden_states,)
if self.output_hidden_states:
lowerCAmelCase = outputs + (all_hidden_states,)
if self.output_attentions:
lowerCAmelCase = outputs + (all_attentions,)
lowerCAmelCase = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'''The Bert Model transformer with early exiting (DeeBERT). ''' , __lowercase , )
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Optional[int] ) -> str:
super().__init__(UpperCAmelCase__ )
lowerCAmelCase = config
lowerCAmelCase = BertEmbeddings(UpperCAmelCase__ )
lowerCAmelCase = DeeBertEncoder(UpperCAmelCase__ )
lowerCAmelCase = BertPooler(UpperCAmelCase__ )
self.init_weights()
def __UpperCAmelCase ( self : Any ) -> int:
self.encoder.init_highway_pooler(self.pooler )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
return self.embeddings.word_embeddings
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Dict ) -> List[Any]:
lowerCAmelCase = value
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : int ) -> Dict:
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(UpperCAmelCase__ )
@add_start_docstrings_to_model_forward(UpperCAmelCase__ )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : str=None , UpperCAmelCase__ : List[Any]=None , ) -> Optional[int]:
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
lowerCAmelCase = input_ids.size()
elif inputs_embeds is not None:
lowerCAmelCase = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
lowerCAmelCase = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
lowerCAmelCase = torch.ones(UpperCAmelCase__ , device=UpperCAmelCase__ )
if encoder_attention_mask is None:
lowerCAmelCase = torch.ones(UpperCAmelCase__ , device=UpperCAmelCase__ )
if token_type_ids is None:
lowerCAmelCase = torch.zeros(UpperCAmelCase__ , dtype=torch.long , device=UpperCAmelCase__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
lowerCAmelCase = self.get_extended_attention_mask(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
lowerCAmelCase = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
lowerCAmelCase = encoder_attention_mask[:, None, None, :]
lowerCAmelCase = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
lowerCAmelCase = (1.0 - encoder_extended_attention_mask) * -10_000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
lowerCAmelCase = self.get_head_mask(UpperCAmelCase__ , self.config.num_hidden_layers )
lowerCAmelCase = self.embeddings(
input_ids=UpperCAmelCase__ , position_ids=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , inputs_embeds=UpperCAmelCase__ )
lowerCAmelCase = self.encoder(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , head_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , )
lowerCAmelCase = encoder_outputs[0]
lowerCAmelCase = self.pooler(UpperCAmelCase__ )
lowerCAmelCase = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int ) -> Dict:
lowerCAmelCase = message
lowerCAmelCase = exit_layer # start from 1!
class UpperCAmelCase_ ( nn.Module ):
def __init__( self : List[Any] , UpperCAmelCase__ : Optional[Any] ) -> List[str]:
super().__init__()
lowerCAmelCase = BertPooler(UpperCAmelCase__ )
lowerCAmelCase = nn.Dropout(config.hidden_dropout_prob )
lowerCAmelCase = nn.Linear(config.hidden_size , config.num_labels )
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : Dict ) -> Optional[int]:
# Pooler
lowerCAmelCase = encoder_outputs[0]
lowerCAmelCase = self.pooler(UpperCAmelCase__ )
# "return" pooler_output
# BertModel
lowerCAmelCase = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
lowerCAmelCase = bmodel_output[1]
lowerCAmelCase = self.dropout(UpperCAmelCase__ )
lowerCAmelCase = self.classifier(UpperCAmelCase__ )
return logits, pooled_output
@add_start_docstrings(
'''Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. ''' , __lowercase , )
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : Dict , UpperCAmelCase__ : Dict ) -> Any:
super().__init__(UpperCAmelCase__ )
lowerCAmelCase = config.num_labels
lowerCAmelCase = config.num_hidden_layers
lowerCAmelCase = DeeBertModel(UpperCAmelCase__ )
lowerCAmelCase = nn.Dropout(config.hidden_dropout_prob )
lowerCAmelCase = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCAmelCase__ )
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : str=None , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : List[Any]=-1 , UpperCAmelCase__ : Optional[Any]=False , ) -> Dict:
lowerCAmelCase = self.num_layers
try:
lowerCAmelCase = self.bert(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , position_ids=UpperCAmelCase__ , head_mask=UpperCAmelCase__ , inputs_embeds=UpperCAmelCase__ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
lowerCAmelCase = outputs[1]
lowerCAmelCase = self.dropout(UpperCAmelCase__ )
lowerCAmelCase = self.classifier(UpperCAmelCase__ )
lowerCAmelCase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
lowerCAmelCase = e.message
lowerCAmelCase = e.exit_layer
lowerCAmelCase = outputs[0]
if not self.training:
lowerCAmelCase = entropy(UpperCAmelCase__ )
lowerCAmelCase = []
lowerCAmelCase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
lowerCAmelCase = MSELoss()
lowerCAmelCase = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
lowerCAmelCase = CrossEntropyLoss()
lowerCAmelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
lowerCAmelCase = []
for highway_exit in outputs[-1]:
lowerCAmelCase = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCAmelCase__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
lowerCAmelCase = MSELoss()
lowerCAmelCase = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
lowerCAmelCase = CrossEntropyLoss()
lowerCAmelCase = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(UpperCAmelCase__ )
if train_highway:
lowerCAmelCase = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
lowerCAmelCase = (loss,) + outputs
if not self.training:
lowerCAmelCase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
lowerCAmelCase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 55 | 0 |
from __future__ import annotations
import os
from typing import Any
import requests
lowerCAmelCase : Tuple = """https://api.github.com"""
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
lowerCAmelCase : Union[str, Any] = BASE_URL + """/user"""
# https://github.com/settings/tokens
lowerCAmelCase : Dict = os.environ.get("""USER_TOKEN""", """""")
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Tuple = {
"Authorization": f"token {auth_token}",
"Accept": "application/vnd.github.v3+json",
}
return requests.get(_UpperCAmelCase , headers=_UpperCAmelCase ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f'''{key}: {value}''')
else:
raise ValueError("""'USER_TOKEN' field cannot be empty.""")
| 13 |
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
UpperCamelCase : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
UpperCamelCase : list[int] = [ord(letter) for letter in string.ascii_lowercase]
UpperCamelCase : set[int] = {ord(char) for char in VALID_CHARS}
UpperCamelCase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def A ( snake_case :list[int] , snake_case :tuple[int, ...] ) -> str | None:
__UpperCamelCase = ""
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
for keychar, cipherchar in zip(cycle(snake_case ) , snake_case ):
__UpperCamelCase = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(snake_case )
return decoded
def A ( snake_case :list[int] ) -> list[str]:
__UpperCamelCase = []
for key in product(snake_case , repeat=3 ):
__UpperCamelCase = try_key(snake_case , snake_case )
if encoded is not None:
possibles.append(snake_case )
return possibles
def A ( snake_case :list[str] , snake_case :str ) -> list[str]:
return [possible for possible in possibles if common_word in possible.lower()]
def A ( snake_case :str = "p059_cipher.txt" ) -> int:
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = Path(snake_case ).parent.joinpath(snake_case ).read_text(encoding='utf-8' )
__UpperCamelCase = [int(snake_case ) for number in data.strip().split(',' )]
__UpperCamelCase = filter_valid_chars(snake_case )
for common_word in COMMON_WORDS:
__UpperCamelCase = filter_common_word(snake_case , snake_case )
if len(snake_case ) == 1:
break
__UpperCamelCase = possibles[0]
return sum(ord(snake_case ) for char in decoded_text )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 316 | 0 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
_SCREAMING_SNAKE_CASE : Dict = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f'''{bindir}/../../examples/pytorch/translation'''):
from run_translation import main # noqa
set_seed(42)
_SCREAMING_SNAKE_CASE : int = "sshleifer/student_marian_en_ro_6_1"
_SCREAMING_SNAKE_CASE : List[str] = "sshleifer/tiny-mbart"
@require_torch
class A__ ( snake_case__ ):
"""simple docstring"""
def a_ ( self , __snake_case=False , __snake_case=None , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=True , ):
snake_case = self.run_trainer(
eval_steps=1 , max_len=1_2 , model_name=__snake_case , num_train_epochs=1 , distributed=__snake_case , extra_args_str=__snake_case , predict_with_generate=__snake_case , do_train=__snake_case , do_eval=__snake_case , do_predict=__snake_case , )
snake_case = TrainerState.load_from_json(os.path.join(__snake_case , '''trainer_state.json''' ) ).log_history
if not do_eval:
return
snake_case = [log for log in logs if '''eval_loss''' in log.keys()]
snake_case = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
snake_case = eval_metrics[-1]
assert isinstance(last_step_stats['''eval_bleu'''] , __snake_case )
assert not math.isnan(float(last_step_stats['''eval_loss'''] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def a_ ( self ):
self.run_seqaseq_quick()
@require_torch_multi_gpu
def a_ ( self ):
self.run_seqaseq_quick(distributed=__snake_case )
@require_torch_multi_gpu
def a_ ( self ):
self.run_seqaseq_quick(distributed=__snake_case )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def a_ ( self ):
self.run_seqaseq_quick(distributed=__snake_case , extra_args_str='''--sharded_ddp simple''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def a_ ( self ):
self.run_seqaseq_quick(distributed=__snake_case , extra_args_str='''--sharded_ddp simple --fp16''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def a_ ( self ):
self.run_seqaseq_quick(distributed=__snake_case , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=__snake_case )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def a_ ( self ):
self.run_seqaseq_quick(
distributed=__snake_case , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=__snake_case )
@require_apex
@require_torch_gpu
def a_ ( self ):
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=__snake_case , extra_args_str='''--fp16 --fp16_backend=apex''' )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=__snake_case , extra_args_str='''--fp16 --fp16_backend=apex''' )
@parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''] )
@require_torch_multi_gpu
def a_ ( self , __snake_case ):
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
snake_case = {
# test with the default log_level - should be info and thus log info once
'''base''': {'''extra_args_str''': '''''', '''n_matches''': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0},
}
snake_case = experiments[experiment_id]
snake_case = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False}
snake_case = '''Running training'''
with CaptureStderr() as cl:
self.run_seqaseq_quick(**__snake_case , extra_args_str=data['''extra_args_str'''] )
snake_case = len(re.findall(__snake_case , cl.err ) )
self.assertEqual(__snake_case , data['''n_matches'''] )
@slow
def a_ ( self ):
snake_case = self.run_trainer(
eval_steps=2 , max_len=1_2_8 , model_name=__snake_case , learning_rate=3E-4 , num_train_epochs=1_0 , distributed=__snake_case , )
# Check metrics
snake_case = TrainerState.load_from_json(os.path.join(__snake_case , '''trainer_state.json''' ) ).log_history
snake_case = [log for log in logs if '''eval_loss''' in log.keys()]
snake_case = eval_metrics[0]
snake_case = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['''eval_bleu'''] , __snake_case )
# test if do_predict saves generations and metrics
snake_case = os.listdir(__snake_case )
snake_case = {os.path.basename(__snake_case ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def a_ ( self ):
from transformers.training_args import OptimizerNames
def train_and_return_metrics(__snake_case ) -> Tuple[int, float]:
snake_case = '''--skip_memory_metrics 0'''
snake_case = self.run_trainer(
max_len=1_2_8 , model_name=__snake_case , learning_rate=3E-4 , num_train_epochs=1 , optim=__snake_case , distributed=__snake_case , extra_args_str=__snake_case , do_eval=__snake_case , do_predict=__snake_case , n_gpus_to_use=1 , )
# Check metrics
snake_case = TrainerState.load_from_json(Path(__snake_case , '''trainer_state.json''' ) ).log_history
snake_case = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**2_0 )
snake_case = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**2_0 )
snake_case = logs[0]['''train_loss''']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
snake_case , snake_case , snake_case = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
snake_case , snake_case , snake_case = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
snake_case = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
snake_case = gpu_peak_mem_orig + gpu_alloc_mem_orig
snake_case = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
snake_case = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
snake_case = 1_2_0
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
__snake_case , __snake_case , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'''
F''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'''
F''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , )
self.assertGreater(
__snake_case , __snake_case , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'''
F''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'''
F''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , )
self.assertEqual(
__snake_case , __snake_case , F'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' )
def a_ ( self , __snake_case , __snake_case , __snake_case , __snake_case = 3E-3 , __snake_case = "adafactor" , __snake_case = False , __snake_case = None , __snake_case = 0 , __snake_case = True , __snake_case = True , __snake_case = True , __snake_case = True , __snake_case = None , ):
snake_case = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro'''
snake_case = self.get_auto_remove_tmp_dir()
snake_case = F'''
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(__snake_case )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(__snake_case )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
'''.split()
snake_case = F'''
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(__snake_case )}
'''.split()
snake_case = '''
--do_predict
'''.split()
snake_case = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F'''--optim {optim}'''.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
snake_case = get_gpu_count()
snake_case = get_torch_dist_unique_port()
snake_case = F'''
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
'''.split()
snake_case = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__snake_case , env=self.get_env() )
else:
snake_case = ['''run_translation.py'''] + args
with patch.object(__snake_case , '''argv''' , __snake_case ):
main()
return output_dir
| 213 |
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
_SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
class A__ ( snake_case__ ):
"""simple docstring"""
def __init__( self , *__snake_case , **__snake_case ):
warnings.warn(
'''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use BeitImageProcessor instead.''' , __snake_case , )
super().__init__(*__snake_case , **__snake_case )
| 213 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
__A =logging.get_logger(__name__)
class _snake_case ( a__ ):
def __init__( self , *_lowerCamelCase , **_lowerCamelCase):
warnings.warn(
"""The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use FlavaImageProcessor instead.""" , _lowerCamelCase , )
super().__init__(*_lowerCamelCase , **_lowerCamelCase) | 163 |
'''simple docstring'''
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('ignore', category=UserWarning, module='torch.optim.lr_scheduler')
class _snake_case :
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = True , _lowerCamelCase = False):
UpperCAmelCase__ : str = scheduler
UpperCAmelCase__ : Dict = optimizers if isinstance(_lowerCamelCase , (list, tuple)) else [optimizers]
UpperCAmelCase__ : List[Any] = split_batches
UpperCAmelCase__ : Tuple = step_with_optimizer
UpperCAmelCase__ : Union[str, Any] = GradientState()
def snake_case__ ( self , *_lowerCamelCase , **_lowerCamelCase):
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase)
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase)
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
UpperCAmelCase__ : Dict = AcceleratorState().num_processes
for _ in range(_lowerCamelCase):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , """total_steps"""):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase)
else:
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase)
def snake_case__ ( self):
return self.scheduler.get_last_lr()
def snake_case__ ( self):
return self.scheduler.state_dict()
def snake_case__ ( self , _lowerCamelCase):
self.scheduler.load_state_dict(_lowerCamelCase)
def snake_case__ ( self):
return self.scheduler.get_lr()
def snake_case__ ( self , *_lowerCamelCase , **_lowerCamelCase):
return self.scheduler.print_lr(*_lowerCamelCase , **_lowerCamelCase) | 163 | 1 |
'''simple docstring'''
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
a : List[str] = logging.get_logger(__name__)
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase=None, __UpperCAmelCase=None ) -> Dict:
'''simple docstring'''
if "." in tensor_name:
snake_case_ = tensor_name.split('''.''' )
for split in splits[:-1]:
snake_case_ = getattr(__lowerCAmelCase, __lowerCAmelCase )
if new_module is None:
raise ValueError(F"{module} has no attribute {split}." )
snake_case_ = new_module
snake_case_ = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F"{module} does not have a parameter or a buffer named {tensor_name}." )
snake_case_ = tensor_name in module._buffers
snake_case_ = getattr(__lowerCAmelCase, __lowerCAmelCase )
if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None:
raise ValueError(F"{tensor_name} is on the meta device, we need a `value` to put in on {device}." )
snake_case_ = False
snake_case_ = False
if is_buffer or not is_bitsandbytes_available():
snake_case_ = False
snake_case_ = False
else:
snake_case_ = hasattr(bnb.nn, '''Params4bit''' ) and isinstance(module._parameters[tensor_name], bnb.nn.Paramsabit )
snake_case_ = isinstance(module._parameters[tensor_name], bnb.nn.IntaParams )
if is_abit or is_abit:
snake_case_ = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
snake_case_ = old_value.to(__lowerCAmelCase )
elif isinstance(__lowerCAmelCase, torch.Tensor ):
snake_case_ = value.to('''cpu''' )
if value.dtype == torch.inta:
snake_case_ = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse(
'''0.37.2''' )
if not is_abit_serializable:
raise ValueError(
'''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '''
'''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' )
else:
snake_case_ = torch.tensor(__lowerCAmelCase, device='''cpu''' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls, __lowerCAmelCase ) and fpaa_statistics is None:
snake_case_ = new_value.T
snake_case_ = old_value.__dict__
if is_abit:
snake_case_ = bnb.nn.IntaParams(__lowerCAmelCase, requires_grad=__lowerCAmelCase, **__lowerCAmelCase ).to(__lowerCAmelCase )
elif is_abit:
snake_case_ = bnb.nn.Paramsabit(__lowerCAmelCase, requires_grad=__lowerCAmelCase, **__lowerCAmelCase ).to(__lowerCAmelCase )
snake_case_ = new_value
if fpaa_statistics is not None:
setattr(module.weight, '''SCB''', fpaa_statistics.to(__lowerCAmelCase ) )
else:
if value is None:
snake_case_ = old_value.to(__lowerCAmelCase )
elif isinstance(__lowerCAmelCase, torch.Tensor ):
snake_case_ = value.to(__lowerCAmelCase )
else:
snake_case_ = torch.tensor(__lowerCAmelCase, device=__lowerCAmelCase )
if is_buffer:
snake_case_ = new_value
else:
snake_case_ = nn.Parameter(__lowerCAmelCase, requires_grad=old_value.requires_grad )
snake_case_ = new_value
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase=None, __UpperCAmelCase=None, __UpperCAmelCase=None, __UpperCAmelCase=False ) -> Optional[int]:
'''simple docstring'''
for name, module in model.named_children():
if current_key_name is None:
snake_case_ = []
current_key_name.append(__lowerCAmelCase )
if (isinstance(__lowerCAmelCase, nn.Linear ) or isinstance(__lowerCAmelCase, __lowerCAmelCase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '''.'''.join(__lowerCAmelCase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(__lowerCAmelCase, __lowerCAmelCase ):
snake_case_ = module.weight.shape
else:
snake_case_ = module.in_features
snake_case_ = module.out_features
if quantization_config.quantization_method() == "llm_int8":
snake_case_ = bnb.nn.LinearabitLt(
__lowerCAmelCase, __lowerCAmelCase, module.bias is not None, has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight, threshold=quantization_config.llm_inta_threshold, )
snake_case_ = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
snake_case_ = bnb.nn.Linearabit(
__lowerCAmelCase, __lowerCAmelCase, module.bias is not None, quantization_config.bnb_abit_compute_dtype, compress_statistics=quantization_config.bnb_abit_use_double_quant, quant_type=quantization_config.bnb_abit_quant_type, )
snake_case_ = True
# Store the module class in case we need to transpose the weight later
snake_case_ = type(__lowerCAmelCase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(__lowerCAmelCase )
if len(list(module.children() ) ) > 0:
snake_case_ = _replace_with_bnb_linear(
__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, has_been_replaced=__lowerCAmelCase, )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase=None, __UpperCAmelCase=None, __UpperCAmelCase=None ) -> str:
'''simple docstring'''
snake_case_ = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
snake_case_ = _replace_with_bnb_linear(
__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def __magic_name__ ( *__UpperCAmelCase, **__UpperCAmelCase ) -> Tuple:
'''simple docstring'''
warnings.warn(
'''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''', __lowerCAmelCase, )
return replace_with_bnb_linear(*__lowerCAmelCase, **__lowerCAmelCase )
def __magic_name__ ( *__UpperCAmelCase, **__UpperCAmelCase ) -> Dict:
'''simple docstring'''
warnings.warn(
'''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''', __lowerCAmelCase, )
return set_module_quantized_tensor_to_device(*__lowerCAmelCase, **__lowerCAmelCase )
def __magic_name__ ( __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
snake_case_ = deepcopy(__lowerCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
snake_case_ = find_tied_parameters(__lowerCAmelCase )
# For compatibility with Accelerate < 0.18
if isinstance(__lowerCAmelCase, __lowerCAmelCase ):
snake_case_ = sum(list(tied_params.values() ), [] ) + list(tied_params.keys() )
else:
snake_case_ = sum(__lowerCAmelCase, [] )
snake_case_ = len(__lowerCAmelCase ) > 0
# Check if it is a base model
snake_case_ = not hasattr(__lowerCAmelCase, model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
snake_case_ = list(model.named_children() )
snake_case_ = [list_modules[-1][0]]
# add last module together with tied weights
snake_case_ = set(__lowerCAmelCase ) - set(__lowerCAmelCase )
snake_case_ = list(set(__lowerCAmelCase ) ) + list(__lowerCAmelCase )
# remove ".weight" from the keys
snake_case_ = ['''.weight''', '''.bias''']
snake_case_ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
snake_case_ = name.replace(__lowerCAmelCase, '''''' )
filtered_module_names.append(__lowerCAmelCase )
return filtered_module_names
| 361 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Any:
'''simple docstring'''
_enforce_args(__UpperCAmelCase, __UpperCAmelCase )
if n == 0:
return 0
snake_case_ = float('''-inf''' )
for i in range(1, n + 1 ):
snake_case_ = max(
__UpperCAmelCase, prices[i - 1] + naive_cut_rod_recursive(n - i, __UpperCAmelCase ) )
return max_revue
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Any:
'''simple docstring'''
_enforce_args(__UpperCAmelCase, __UpperCAmelCase )
snake_case_ = [float('''-inf''' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
snake_case_ = float('''-inf''' )
for i in range(1, n + 1 ):
snake_case_ = max(
__UpperCAmelCase, prices[i - 1] + _top_down_cut_rod_recursive(n - i, __UpperCAmelCase, __UpperCAmelCase ), )
snake_case_ = max_revenue
return max_rev[n]
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Any:
'''simple docstring'''
_enforce_args(__UpperCAmelCase, __UpperCAmelCase )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
snake_case_ = [float('''-inf''' ) for _ in range(n + 1 )]
snake_case_ = 0
for i in range(1, n + 1 ):
snake_case_ = max_rev[i]
for j in range(1, i + 1 ):
snake_case_ = max(__UpperCAmelCase, prices[j - 1] + max_rev[i - j] )
snake_case_ = max_revenue_i
return max_rev[n]
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> int:
'''simple docstring'''
if n < 0:
snake_case_ = F"n must be greater than or equal to 0. Got n = {n}"
raise ValueError(__UpperCAmelCase )
if n > len(__UpperCAmelCase ):
snake_case_ = (
'''Each integral piece of rod must have a corresponding price. '''
F"Got n = {n} but length of prices = {len(__UpperCAmelCase )}"
)
raise ValueError(__UpperCAmelCase )
def __magic_name__ ( ) -> Optional[int]:
'''simple docstring'''
snake_case_ = [6, 10, 12, 15, 20, 23]
snake_case_ = len(__UpperCAmelCase )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
snake_case_ = 36
snake_case_ = top_down_cut_rod(__UpperCAmelCase, __UpperCAmelCase )
snake_case_ = bottom_up_cut_rod(__UpperCAmelCase, __UpperCAmelCase )
snake_case_ = naive_cut_rod_recursive(__UpperCAmelCase, __UpperCAmelCase )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 72 | 0 |
"""simple docstring"""
import warnings
from ..trainer import Trainer
from ..utils import logging
_lowerCamelCase : Optional[int] = logging.get_logger(__name__)
class lowercase ( UpperCamelCase__):
def __init__( self : List[Any] , _lowerCamelCase : Optional[Any]=None , **_lowerCamelCase : str ):
"""simple docstring"""
warnings.warn(
'''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '''
'''instead.''' , lowerCAmelCase__ , )
super().__init__(args=lowerCAmelCase__ , **lowerCAmelCase__ )
| 167 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __lowerCAmelCase ( unittest.TestCase):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=1_8 , lowerCAmelCase__=3_0 , lowerCAmelCase__=4_0_0 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=None , ) -> Optional[int]:
'''simple docstring'''
a__ : str =size if size is not None else {"shortest_edge": 2_0}
a__ : Union[str, Any] =crop_size if crop_size is not None else {"height": 1_8, "width": 1_8}
a__ : Tuple =parent
a__ : Optional[int] =batch_size
a__ : Any =num_channels
a__ : List[str] =image_size
a__ : Dict =min_resolution
a__ : List[Any] =max_resolution
a__ : Dict =do_resize
a__ : Union[str, Any] =size
a__ : str =do_center_crop
a__ : List[str] =crop_size
def _lowercase ( self ) -> str:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __lowerCAmelCase ( UpperCamelCase__ , unittest.TestCase):
_lowercase : Optional[Any] = MobileNetVaImageProcessor if is_vision_available() else None
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
a__ : Optional[int] =MobileNetVaImageProcessingTester(self )
@property
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : List[str] =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_resize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "size" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_center_crop" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "crop_size" ) )
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ : Any =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 2_0} )
self.assertEqual(image_processor.crop_size , {"height": 1_8, "width": 1_8} )
a__ : Dict =self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2} )
self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4} )
def _lowercase ( self ) -> Any:
'''simple docstring'''
pass
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : Dict =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a__ : Optional[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
a__ : List[Any] =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a__ : Dict =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _lowercase ( self ) -> int:
'''simple docstring'''
a__ : str =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a__ : str =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
a__ : List[str] =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a__ : Union[str, Any] =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : Any =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a__ : int =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
a__ : Optional[Any] =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a__ : str =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 95 | 0 |
'''simple docstring'''
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 164 |
'''simple docstring'''
def UpperCAmelCase ( a_ , a_ ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def UpperCAmelCase ( ) -> None:
"""simple docstring"""
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 164 | 1 |
from __future__ import annotations
UpperCAmelCase_ : int = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[list[int]] , __magic_name__ : list[int] , __magic_name__ : list[int] , __magic_name__ : int , __magic_name__ : list[list[int]] , ) -> tuple[list[list[int]], list[list[int]]]:
"""simple docstring"""
UpperCamelCase :int = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__magic_name__ ) )
] # the reference grid
UpperCamelCase :Tuple = 1
UpperCamelCase :List[Any] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__magic_name__ ) )
] # the action grid
UpperCamelCase :Dict = init[0]
UpperCamelCase :Any = init[1]
UpperCamelCase :int = 0
UpperCamelCase :str = g + heuristic[x][y] # cost from starting cell to destination cell
UpperCamelCase :Union[str, Any] = [[f, g, x, y]]
UpperCamelCase :Any = False # flag that is set when search is complete
UpperCamelCase :Union[str, Any] = False # flag set if we can't find expand
while not found and not resign:
if len(__magic_name__ ) == 0:
raise ValueError("""Algorithm is unable to find solution""" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
UpperCamelCase :Dict = cell.pop()
UpperCamelCase :Dict = next_cell[2]
UpperCamelCase :List[Any] = next_cell[3]
UpperCamelCase :List[str] = next_cell[1]
if x == goal[0] and y == goal[1]:
UpperCamelCase :List[Any] = True
else:
for i in range(len(__magic_name__ ) ): # to try out different valid actions
UpperCamelCase :Any = x + DIRECTIONS[i][0]
UpperCamelCase :Optional[Any] = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(__magic_name__ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
UpperCamelCase :int = g + cost
UpperCamelCase :Union[str, Any] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
UpperCamelCase :List[Any] = 1
UpperCamelCase :Dict = i
UpperCamelCase :List[Any] = []
UpperCamelCase :Optional[Any] = goal[0]
UpperCamelCase :Optional[Any] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
UpperCamelCase :str = x - DIRECTIONS[action[x][y]][0]
UpperCamelCase :Optional[int] = y - DIRECTIONS[action[x][y]][1]
UpperCamelCase :int = xa
UpperCamelCase :Tuple = ya
invpath.append([x, y] )
UpperCamelCase :Optional[int] = []
for i in range(len(__magic_name__ ) ):
path.append(invpath[len(__magic_name__ ) - 1 - i] )
return path, action
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
UpperCAmelCase_ : int = [0, 0]
# all coordinates are given in format [y,x]
UpperCAmelCase_ : int = [len(grid) - 1, len(grid[0]) - 1]
UpperCAmelCase_ : Optional[Any] = 1
# the cost map which pushes the path closer to the goal
UpperCAmelCase_ : List[Any] = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
UpperCAmelCase_ : int = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
UpperCAmelCase_ : Union[str, Any] = 99
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = search(grid, init, goal, cost, heuristic)
print('''ACTION MAP''')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 38 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : Any = """openai/whisper-base"""
snake_case__ : Optional[int] = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
snake_case__ : Any = """transcriber"""
snake_case__ : Optional[int] = WhisperProcessor
snake_case__ : str = WhisperForConditionalGeneration
snake_case__ : Optional[Any] = ["""audio"""]
snake_case__ : Any = ["""text"""]
def _A ( self : str , __lowerCamelCase : Dict ):
return self.pre_processor(__lowerCamelCase , return_tensors="""pt""" ).input_features
def _A ( self : Dict , __lowerCamelCase : List[Any] ):
return self.model.generate(inputs=__lowerCamelCase )
def _A ( self : Any , __lowerCamelCase : Optional[Any] ):
return self.pre_processor.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )[0]
| 38 | 1 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ) -> str:
_UpperCAmelCase : Tuple = get_activation("""swish""" )
self.assertIsInstance(snake_case__ ,nn.SiLU )
self.assertEqual(act(torch.tensor(-100 ,dtype=torch.floataa ) ).item() ,0 )
self.assertNotEqual(act(torch.tensor(-1 ,dtype=torch.floataa ) ).item() ,0 )
self.assertEqual(act(torch.tensor(0 ,dtype=torch.floataa ) ).item() ,0 )
self.assertEqual(act(torch.tensor(20 ,dtype=torch.floataa ) ).item() ,20 )
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : Any = get_activation("""silu""" )
self.assertIsInstance(snake_case__ ,nn.SiLU )
self.assertEqual(act(torch.tensor(-100 ,dtype=torch.floataa ) ).item() ,0 )
self.assertNotEqual(act(torch.tensor(-1 ,dtype=torch.floataa ) ).item() ,0 )
self.assertEqual(act(torch.tensor(0 ,dtype=torch.floataa ) ).item() ,0 )
self.assertEqual(act(torch.tensor(20 ,dtype=torch.floataa ) ).item() ,20 )
def _snake_case ( self ) -> str:
_UpperCAmelCase : List[str] = get_activation("""mish""" )
self.assertIsInstance(snake_case__ ,nn.Mish )
self.assertEqual(act(torch.tensor(-200 ,dtype=torch.floataa ) ).item() ,0 )
self.assertNotEqual(act(torch.tensor(-1 ,dtype=torch.floataa ) ).item() ,0 )
self.assertEqual(act(torch.tensor(0 ,dtype=torch.floataa ) ).item() ,0 )
self.assertEqual(act(torch.tensor(20 ,dtype=torch.floataa ) ).item() ,20 )
def _snake_case ( self ) -> Union[str, Any]:
_UpperCAmelCase : Dict = get_activation("""gelu""" )
self.assertIsInstance(snake_case__ ,nn.GELU )
self.assertEqual(act(torch.tensor(-100 ,dtype=torch.floataa ) ).item() ,0 )
self.assertNotEqual(act(torch.tensor(-1 ,dtype=torch.floataa ) ).item() ,0 )
self.assertEqual(act(torch.tensor(0 ,dtype=torch.floataa ) ).item() ,0 )
self.assertEqual(act(torch.tensor(20 ,dtype=torch.floataa ) ).item() ,20 )
| 362 |
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def snake_case_ ( )-> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
_UpperCAmelCase : str = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(lowerCAmelCase_ )
# Let's go
_UpperCAmelCase : Union[str, Any] = parser.parse_args()
if not hasattr(lowerCAmelCase_ , """func""" ):
parser.print_help()
exit(1 )
# Run
_UpperCAmelCase : Optional[int] = args.func(lowerCAmelCase_ )
service.run()
if __name__ == "__main__":
main()
| 349 | 0 |
import os
import numpy
import onnx
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
__UpperCAmelCase = a.name
__UpperCAmelCase = b.name
__UpperCAmelCase = ''''''
__UpperCAmelCase = ''''''
__UpperCAmelCase = a == b
__UpperCAmelCase = name_a
__UpperCAmelCase = name_b
return res
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_graph_replace_input_with(node_proto.attribute[1].g , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
for n in graph_proto.node:
_node_replace_input_with(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = list(model.graph.initializer )
__UpperCAmelCase = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
__UpperCAmelCase = inits[i].name
__UpperCAmelCase = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
__UpperCAmelCase = os.path.dirname(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = os.path.basename(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = onnx.load(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase = list(model.graph.initializer )
__UpperCAmelCase = set()
__UpperCAmelCase = {}
__UpperCAmelCase = []
__UpperCAmelCase = 0
for i in range(len(SCREAMING_SNAKE_CASE ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(SCREAMING_SNAKE_CASE ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(SCREAMING_SNAKE_CASE )
dup_set.add(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = inits[j].data_type
__UpperCAmelCase = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 1_1:
mem_size *= 8
else:
print('''unexpected data type: ''' , SCREAMING_SNAKE_CASE )
total_reduced_size += mem_size
__UpperCAmelCase = inits[i].name
__UpperCAmelCase = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(SCREAMING_SNAKE_CASE )
else:
__UpperCAmelCase = [name_j]
ind_to_replace.append((j, i) )
print('''total reduced size: ''' , total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 , '''GB''' )
__UpperCAmelCase = sorted(SCREAMING_SNAKE_CASE )
_remove_dup_initializers_from_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCAmelCase = '''optimized_''' + model_file_name
__UpperCAmelCase = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
onnx.save(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return new_model
| 333 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
A_ : str = logging.get_logger(__name__)
A_ : str = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
A_ : Optional[int] = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
A_ : Union[str, Any] = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
A_ : Dict = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
A_ : Optional[int] = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
A_ : Dict = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
A_ : List[str] = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
A_ : Tuple = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
A_ : Optional[int] = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
A_ : int = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
A_ : Tuple = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
A_ : Tuple = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
A_ : int = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
A_ : Tuple = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
A_ : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
A_ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
A_ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
A_ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
A_ : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
A_ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
A_ : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
A_ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
A_ : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
A_ : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
A_ : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
A_ : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
A_ : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
A_ : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_MAPPING
A_ : Tuple = auto_class_update(FlaxAutoModel)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_PRETRAINING_MAPPING
A_ : str = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
A_ : Optional[Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_MASKED_LM_MAPPING
A_ : List[str] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
A_ : Union[str, Any] = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
A_ : Tuple = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
A_ : Any = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
A_ : Dict = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
A_ : Any = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
A_ : Tuple = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
A_ : int = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
A_ : Tuple = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
A_ : Optional[int] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 333 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
UpperCAmelCase : int = 'laion/clap-htsat-unfused'
UpperCAmelCase : Optional[int] = tempfile.mkdtemp()
def UpperCAmelCase_ ( self : Dict , **lowercase_ : str ) -> Dict:
return RobertaTokenizer.from_pretrained(self.checkpoint , **lowercase_ )
def UpperCAmelCase_ ( self : Tuple , **lowercase_ : Union[str, Any] ) -> List[Any]:
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **lowercase_ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self : int ) -> List[str]:
UpperCAmelCase : Dict = self.get_tokenizer()
UpperCAmelCase : List[Any] = self.get_feature_extractor()
UpperCAmelCase : List[Any] = ClapProcessor(tokenizer=lowercase_ , feature_extractor=lowercase_ )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase : Dict = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , lowercase_ )
def UpperCAmelCase_ ( self : List[str] ) -> Dict:
UpperCAmelCase : Tuple = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase : str = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
UpperCAmelCase : int = self.get_feature_extractor(do_normalize=lowercase_ , padding_value=1.0 )
UpperCAmelCase : Dict = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=lowercase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> List[Any]:
UpperCAmelCase : List[str] = self.get_feature_extractor()
UpperCAmelCase : str = self.get_tokenizer()
UpperCAmelCase : List[Any] = ClapProcessor(tokenizer=lowercase_ , feature_extractor=lowercase_ )
UpperCAmelCase : List[str] = floats_list((3, 1_000) )
UpperCAmelCase : List[str] = feature_extractor(lowercase_ , return_tensors='np' )
UpperCAmelCase : Optional[Any] = processor(audios=lowercase_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
UpperCAmelCase : Union[str, Any] = self.get_feature_extractor()
UpperCAmelCase : int = self.get_tokenizer()
UpperCAmelCase : Tuple = ClapProcessor(tokenizer=lowercase_ , feature_extractor=lowercase_ )
UpperCAmelCase : Tuple = 'This is a test string'
UpperCAmelCase : List[Any] = processor(text=lowercase_ )
UpperCAmelCase : Union[str, Any] = tokenizer(lowercase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase_ ( self : List[Any] ) -> Tuple:
UpperCAmelCase : Dict = self.get_feature_extractor()
UpperCAmelCase : Dict = self.get_tokenizer()
UpperCAmelCase : Tuple = ClapProcessor(tokenizer=lowercase_ , feature_extractor=lowercase_ )
UpperCAmelCase : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase : Optional[Any] = processor.batch_decode(lowercase_ )
UpperCAmelCase : List[Any] = tokenizer.batch_decode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : List[str] ) -> Dict:
UpperCAmelCase : Tuple = self.get_feature_extractor()
UpperCAmelCase : List[Any] = self.get_tokenizer()
UpperCAmelCase : str = ClapProcessor(tokenizer=lowercase_ , feature_extractor=lowercase_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
| 280 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ = logging.get_logger(__name__)
set_seed(770)
lowercase__ = {
"c_attn": "att_proj",
"c_proj": "out_proj",
"c_fc": "in_proj",
"transformer.": "",
"h.": "layers.",
"ln_1": "layernorm_1",
"ln_2": "layernorm_2",
"ln_f": "layernorm_final",
"wpe": "position_embeds_layer",
"wte": "input_embeds_layer",
}
lowercase__ = {
"text_small": {
"repo_id": "suno/bark",
"file_name": "text.pt",
},
"coarse_small": {
"repo_id": "suno/bark",
"file_name": "coarse.pt",
},
"fine_small": {
"repo_id": "suno/bark",
"file_name": "fine.pt",
},
"text": {
"repo_id": "suno/bark",
"file_name": "text_2.pt",
},
"coarse": {
"repo_id": "suno/bark",
"file_name": "coarse_2.pt",
},
"fine": {
"repo_id": "suno/bark",
"file_name": "fine_2.pt",
},
}
lowercase__ = os.path.dirname(os.path.abspath(__file__))
lowercase__ = os.path.join(os.path.expanduser("~"), ".cache")
lowercase__ = os.path.join(os.getenv("XDG_CACHE_HOME", default_cache_dir), "suno", "bark_v0")
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_=False ):
UpperCAmelCase : List[str] = model_type
if use_small:
key += "_small"
return os.path.join(UpperCAmelCase_ , REMOTE_MODEL_PATHS[key]['file_name'] )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
hf_hub_download(repo_id=UpperCAmelCase_ , filename=UpperCAmelCase_ , local_dir=UpperCAmelCase_ )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=False , UpperCAmelCase_="text" ):
if model_type == "text":
UpperCAmelCase : Dict = BarkSemanticModel
UpperCAmelCase : List[Any] = BarkSemanticConfig
UpperCAmelCase : Optional[int] = BarkSemanticGenerationConfig
elif model_type == "coarse":
UpperCAmelCase : List[str] = BarkCoarseModel
UpperCAmelCase : Dict = BarkCoarseConfig
UpperCAmelCase : int = BarkCoarseGenerationConfig
elif model_type == "fine":
UpperCAmelCase : List[Any] = BarkFineModel
UpperCAmelCase : Optional[Any] = BarkFineConfig
UpperCAmelCase : Dict = BarkFineGenerationConfig
else:
raise NotImplementedError()
UpperCAmelCase : Optional[int] = F"""{model_type}_small""" if use_small else model_type
UpperCAmelCase : Tuple = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(UpperCAmelCase_ ):
logger.info(F"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" )
_download(model_info['repo_id'] , model_info['file_name'] )
UpperCAmelCase : Optional[Any] = torch.load(UpperCAmelCase_ , map_location=UpperCAmelCase_ )
# this is a hack
UpperCAmelCase : str = checkpoint['model_args']
if "input_vocab_size" not in model_args:
UpperCAmelCase : Union[str, Any] = model_args['vocab_size']
UpperCAmelCase : Union[str, Any] = model_args['vocab_size']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
UpperCAmelCase : Any = model_args.pop('n_head' )
UpperCAmelCase : Optional[Any] = model_args.pop('n_embd' )
UpperCAmelCase : Union[str, Any] = model_args.pop('n_layer' )
UpperCAmelCase : List[Any] = ConfigClass(**checkpoint['model_args'] )
UpperCAmelCase : List[str] = ModelClass(config=UpperCAmelCase_ )
UpperCAmelCase : List[str] = GenerationConfigClass()
UpperCAmelCase : Dict = model_generation_config
UpperCAmelCase : int = checkpoint['model']
# fixup checkpoint
UpperCAmelCase : Tuple = '_orig_mod.'
for k, v in list(state_dict.items() ):
if k.startswith(UpperCAmelCase_ ):
# replace part of the key with corresponding layer name in HF implementation
UpperCAmelCase : str = k[len(UpperCAmelCase_ ) :]
for old_layer_name in new_layer_name_dict:
UpperCAmelCase : List[Any] = new_k.replace(UpperCAmelCase_ , new_layer_name_dict[old_layer_name] )
UpperCAmelCase : List[Any] = state_dict.pop(UpperCAmelCase_ )
UpperCAmelCase : Tuple = set(state_dict.keys() ) - set(model.state_dict().keys() )
UpperCAmelCase : Optional[int] = {k for k in extra_keys if not k.endswith('.attn.bias' )}
UpperCAmelCase : str = set(model.state_dict().keys() ) - set(state_dict.keys() )
UpperCAmelCase : str = {k for k in missing_keys if not k.endswith('.attn.bias' )}
if len(UpperCAmelCase_ ) != 0:
raise ValueError(F"""extra keys found: {extra_keys}""" )
if len(UpperCAmelCase_ ) != 0:
raise ValueError(F"""missing keys: {missing_keys}""" )
model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
UpperCAmelCase : List[str] = model.num_parameters(exclude_embeddings=UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = checkpoint['best_val_loss'].item()
logger.info(F"""model loaded: {round(n_params/1E6 , 1 )}M params, {round(UpperCAmelCase_ , 3 )} loss""" )
model.eval()
model.to(UpperCAmelCase_ )
del checkpoint, state_dict
return model
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_=False , UpperCAmelCase_="text" ):
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
UpperCAmelCase : List[str] = 'cpu' # do conversion on cpu
UpperCAmelCase : List[str] = _get_ckpt_path(UpperCAmelCase_ , use_small=UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = _load_model(UpperCAmelCase_ , UpperCAmelCase_ , model_type=UpperCAmelCase_ , use_small=UpperCAmelCase_ )
# load bark initial model
UpperCAmelCase : List[str] = _bark_load_model(UpperCAmelCase_ , 'cpu' , model_type=UpperCAmelCase_ , use_small=UpperCAmelCase_ )
if model_type == "text":
UpperCAmelCase : Tuple = bark_model['model']
if model.num_parameters(exclude_embeddings=UpperCAmelCase_ ) != bark_model.get_num_params():
raise ValueError('initial and new models don\'t have the same number of parameters' )
# check if same output as the bark model
UpperCAmelCase : Optional[int] = 5
UpperCAmelCase : Optional[int] = 10
if model_type in ["text", "coarse"]:
UpperCAmelCase : List[Any] = torch.randint(2_56 , (batch_size, sequence_length) , dtype=torch.int )
UpperCAmelCase : Optional[Any] = bark_model(UpperCAmelCase_ )[0]
UpperCAmelCase : List[str] = model(UpperCAmelCase_ )
# take last logits
UpperCAmelCase : str = output_new_model_total.logits[:, [-1], :]
else:
UpperCAmelCase : Optional[int] = 3
UpperCAmelCase : List[Any] = 8
UpperCAmelCase : Dict = torch.randint(2_56 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
UpperCAmelCase : str = model(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCAmelCase : Dict = bark_model(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCAmelCase : Any = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('initial and new outputs don\'t have the same shape' )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError('initial and new outputs are not equal' )
Path(UpperCAmelCase_ ).mkdir(exist_ok=UpperCAmelCase_ )
model.save_pretrained(UpperCAmelCase_ )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ):
UpperCAmelCase : int = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCAmelCase : Dict = BarkSemanticConfig.from_pretrained(os.path.join(UpperCAmelCase_ , 'config.json' ) )
UpperCAmelCase : Any = BarkCoarseConfig.from_pretrained(os.path.join(UpperCAmelCase_ , 'config.json' ) )
UpperCAmelCase : Union[str, Any] = BarkFineConfig.from_pretrained(os.path.join(UpperCAmelCase_ , 'config.json' ) )
UpperCAmelCase : Any = EncodecConfig.from_pretrained('facebook/encodec_24khz' )
UpperCAmelCase : Dict = BarkSemanticModel.from_pretrained(UpperCAmelCase_ )
UpperCAmelCase : Tuple = BarkCoarseModel.from_pretrained(UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = BarkFineModel.from_pretrained(UpperCAmelCase_ )
UpperCAmelCase : str = EncodecModel.from_pretrained('facebook/encodec_24khz' )
UpperCAmelCase : Optional[Any] = BarkConfig.from_sub_model_configs(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
UpperCAmelCase : str = BarkModel(UpperCAmelCase_ )
UpperCAmelCase : int = semantic
UpperCAmelCase : Tuple = coarseAcoustic
UpperCAmelCase : Union[str, Any] = fineAcoustic
UpperCAmelCase : Union[str, Any] = codec
UpperCAmelCase : Optional[int] = bark_generation_config
Path(UpperCAmelCase_ ).mkdir(exist_ok=UpperCAmelCase_ )
bark.save_pretrained(UpperCAmelCase_ , repo_id=UpperCAmelCase_ , push_to_hub=UpperCAmelCase_ )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("model_type", type=str, help="text, coarse or fine.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--is_small", action="store_true", help="convert the small version instead of the large.")
lowercase__ = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 280 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ):
A : Optional[Any] = 'levit'
def __init__( self , SCREAMING_SNAKE_CASE__=224 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=[128, 256, 384] , SCREAMING_SNAKE_CASE__=[4, 8, 12] , SCREAMING_SNAKE_CASE__=[4, 4, 4] , SCREAMING_SNAKE_CASE__=[16, 16, 16] , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=[2, 2, 2] , SCREAMING_SNAKE_CASE__=[2, 2, 2] , SCREAMING_SNAKE_CASE__=0.02 , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(**_a )
lowercase : List[Any] = image_size
lowercase : Union[str, Any] = num_channels
lowercase : Optional[Any] = kernel_size
lowercase : Optional[int] = stride
lowercase : int = padding
lowercase : Optional[int] = hidden_sizes
lowercase : List[str] = num_attention_heads
lowercase : Tuple = depths
lowercase : Any = key_dim
lowercase : Optional[Any] = drop_path_rate
lowercase : Tuple = patch_size
lowercase : Tuple = attention_ratio
lowercase : int = mlp_ratio
lowercase : Any = initializer_range
lowercase : Tuple = [
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ):
A : Dict = version.parse('1.11' )
@property
def __lowerCamelCase ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __lowerCamelCase ( self ):
return 1E-4
| 337 |
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_enforce_args(snake_case_,snake_case_ )
if n == 0:
return 0
_A : Tuple = float("""-inf""" )
for i in range(1,n + 1 ):
_A : str = max(
snake_case_,prices[i - 1] + naive_cut_rod_recursive(n - i,snake_case_ ) )
return max_revue
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_enforce_args(snake_case_,snake_case_ )
_A : Dict = [float("""-inf""" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(snake_case_,snake_case_,snake_case_ )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
_A : List[str] = float("""-inf""" )
for i in range(1,n + 1 ):
_A : Optional[Any] = max(
snake_case_,prices[i - 1] + _top_down_cut_rod_recursive(n - i,snake_case_,snake_case_ ),)
_A : Tuple = max_revenue
return max_rev[n]
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_enforce_args(snake_case_,snake_case_ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
_A : List[Any] = [float("""-inf""" ) for _ in range(n + 1 )]
_A : Any = 0
for i in range(1,n + 1 ):
_A : Optional[Any] = max_rev[i]
for j in range(1,i + 1 ):
_A : int = max(snake_case_,prices[j - 1] + max_rev[i - j] )
_A : int = max_revenue_i
return max_rev[n]
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
if n < 0:
_A : Optional[Any] = f'''n must be greater than or equal to 0. Got n = {n}'''
raise ValueError(snake_case_ )
if n > len(snake_case_ ):
_A : Any = (
"""Each integral piece of rod must have a corresponding price. """
f'''Got n = {n} but length of prices = {len(snake_case_ )}'''
)
raise ValueError(snake_case_ )
def lowerCAmelCase_ ( ):
_A : Tuple = [6, 10, 12, 15, 20, 23]
_A : List[Any] = len(snake_case_ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
_A : Any = 36
_A : List[Any] = top_down_cut_rod(snake_case_,snake_case_ )
_A : List[Any] = bottom_up_cut_rod(snake_case_,snake_case_ )
_A : Dict = naive_cut_rod_recursive(snake_case_,snake_case_ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 26 | 0 |
from __future__ import annotations
from math import gcd
def __lowercase ( a__ , a__ = 2 , a__ = 1 , a__ = 3 , ) -> int | None:
# A value less than 2 can cause an infinite loop in the algorithm.
if num < 2:
raise ValueError('The input value cannot be less than 2' )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(a__ , a__ , a__ ) -> int:
return (pow(a__ , 2 ) + step) % modulus
for _ in range(a__ ):
# These track the position within the cycle detection logic.
__SCREAMING_SNAKE_CASE = seed
__SCREAMING_SNAKE_CASE = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
__SCREAMING_SNAKE_CASE = rand_fn(a__ , a__ , a__ )
__SCREAMING_SNAKE_CASE = rand_fn(a__ , a__ , a__ )
__SCREAMING_SNAKE_CASE = rand_fn(a__ , a__ , a__ )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
__SCREAMING_SNAKE_CASE = gcd(hare - tortoise , a__ )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
__SCREAMING_SNAKE_CASE = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
lowerCAmelCase__ : Tuple =argparse.ArgumentParser()
parser.add_argument(
'''num''',
type=int,
help='''The value to find a divisor of''',
)
parser.add_argument(
'''--attempts''',
type=int,
default=3,
help='''The number of attempts before giving up''',
)
lowerCAmelCase__ : List[Any] =parser.parse_args()
lowerCAmelCase__ : Union[str, Any] =pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F'''{args.num} is probably prime''')
else:
lowerCAmelCase__ : Optional[int] =args.num // divisor
print(F'''{args.num} = {divisor} * {quotient}''')
| 367 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ : List[str] = CycleDiffusionPipeline
UpperCamelCase__ : int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''negative_prompt''',
'''height''',
'''width''',
'''negative_prompt_embeds''',
}
UpperCamelCase__ : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'''latents'''}
UpperCamelCase__ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''source_prompt'''} )
UpperCamelCase__ : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase__ : str = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _A ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
__SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , num_train_timesteps=1_000 , clip_sample=_A , set_alpha_to_one=_A , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
__SCREAMING_SNAKE_CASE = CLIPTextModel(_A )
__SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__SCREAMING_SNAKE_CASE = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _A ( self , _A , _A=0 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
__SCREAMING_SNAKE_CASE = image / 2 + 0.5
if str(_A ).startswith('mps' ):
__SCREAMING_SNAKE_CASE = torch.manual_seed(_A )
else:
__SCREAMING_SNAKE_CASE = torch.Generator(device=_A ).manual_seed(_A )
__SCREAMING_SNAKE_CASE = {
'prompt': 'An astronaut riding an elephant',
'source_prompt': 'An astronaut riding a horse',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'eta': 0.1,
'strength': 0.8,
'guidance_scale': 3,
'source_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = CycleDiffusionPipeline(**_A )
__SCREAMING_SNAKE_CASE = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(_A )
__SCREAMING_SNAKE_CASE = pipe(**_A )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE = np.array([0.4_4_5_9, 0.4_9_4_3, 0.4_5_4_4, 0.6_6_4_3, 0.5_4_7_4, 0.4_3_2_7, 0.5_7_0_1, 0.5_9_5_9, 0.5_1_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
for name, module in components.items():
if hasattr(_A , 'half' ):
__SCREAMING_SNAKE_CASE = module.half()
__SCREAMING_SNAKE_CASE = CycleDiffusionPipeline(**_A )
__SCREAMING_SNAKE_CASE = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(_A )
__SCREAMING_SNAKE_CASE = pipe(**_A )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE = np.array([0.3_5_0_6, 0.4_5_4_3, 0.4_4_6, 0.4_5_7_5, 0.5_1_9_5, 0.4_1_5_5, 0.5_2_7_3, 0.5_1_8, 0.4_1_1_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def _A ( self ):
'''simple docstring'''
return super().test_save_load_local()
@unittest.skip('non-deterministic pipeline' )
def _A ( self ):
'''simple docstring'''
return super().test_inference_batch_single_identical()
@skip_mps
def _A ( self ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _A ( self ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def _A ( self ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _A ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png' )
__SCREAMING_SNAKE_CASE = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy' )
__SCREAMING_SNAKE_CASE = init_image.resize((512, 512) )
__SCREAMING_SNAKE_CASE = 'CompVis/stable-diffusion-v1-4'
__SCREAMING_SNAKE_CASE = DDIMScheduler.from_pretrained(_A , subfolder='scheduler' )
__SCREAMING_SNAKE_CASE = CycleDiffusionPipeline.from_pretrained(
_A , scheduler=_A , safety_checker=_A , torch_dtype=torch.floataa , revision='fp16' )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE = 'A black colored car'
__SCREAMING_SNAKE_CASE = 'A blue colored car'
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = pipe(
prompt=_A , source_prompt=_A , image=_A , num_inference_steps=100 , eta=0.1 , strength=0.8_5 , guidance_scale=3 , source_guidance_scale=1 , generator=_A , output_type='np' , )
__SCREAMING_SNAKE_CASE = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5e-1
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png' )
__SCREAMING_SNAKE_CASE = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy' )
__SCREAMING_SNAKE_CASE = init_image.resize((512, 512) )
__SCREAMING_SNAKE_CASE = 'CompVis/stable-diffusion-v1-4'
__SCREAMING_SNAKE_CASE = DDIMScheduler.from_pretrained(_A , subfolder='scheduler' )
__SCREAMING_SNAKE_CASE = CycleDiffusionPipeline.from_pretrained(_A , scheduler=_A , safety_checker=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE = 'A black colored car'
__SCREAMING_SNAKE_CASE = 'A blue colored car'
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = pipe(
prompt=_A , source_prompt=_A , image=_A , num_inference_steps=100 , eta=0.1 , strength=0.8_5 , guidance_scale=3 , source_guidance_scale=1 , generator=_A , output_type='np' , )
__SCREAMING_SNAKE_CASE = output.images
assert np.abs(image - expected_image ).max() < 2e-2
| 118 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.