code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
from __future__ import annotations
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
if len(snake_case__ ) < k or k < 0:
raise ValueError("Invalid Input" )
SCREAMING_SNAKE_CASE_: List[Any] = sum(array[:k] )
for i in range(len(snake_case__ ) - k ):
SCREAMING_SNAKE_CASE_: Optional[Any] = current_sum - array[i] + array[i + k]
SCREAMING_SNAKE_CASE_: Optional[int] = max(snake_case__ , snake_case__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
lowerCAmelCase : Tuple = [randint(-1000, 1000) for i in range(100)]
lowerCAmelCase : Optional[int] = randint(0, 110)
print(f'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''')
| 13 | import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = (DEISMultistepScheduler,)
UpperCAmelCase_ : int = (("""num_inference_steps""", 25),)
def SCREAMING_SNAKE_CASE_ ( self , **__SCREAMING_SNAKE_CASE ) ->str:
lowerCAmelCase = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
}
config.update(**__SCREAMING_SNAKE_CASE )
return config
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=0 , **__SCREAMING_SNAKE_CASE ) ->Tuple:
lowerCAmelCase = dict(self.forward_default_kwargs )
lowerCAmelCase = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.dummy_sample
lowerCAmelCase = 0.1 * sample
lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler_class.from_pretrained(__SCREAMING_SNAKE_CASE )
new_scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase , lowerCAmelCase = sample, sample
for t in range(__SCREAMING_SNAKE_CASE , time_step + scheduler.config.solver_order + 1 ):
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
lowerCAmelCase = new_scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
pass
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=0 , **__SCREAMING_SNAKE_CASE ) ->List[Any]:
lowerCAmelCase = dict(self.forward_default_kwargs )
lowerCAmelCase = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.dummy_sample
lowerCAmelCase = 0.1 * sample
lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals (must be after setting timesteps)
lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler_class.from_pretrained(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
new_scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residual (must be after setting timesteps)
lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
lowerCAmelCase = new_scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) ->List[Any]:
if scheduler is None:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = 10
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
return sample
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
lowerCAmelCase = dict(self.forward_default_kwargs )
lowerCAmelCase = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE )
for scheduler_class in self.scheduler_classes:
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.dummy_sample
lowerCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(__SCREAMING_SNAKE_CASE , '''set_timesteps''' ):
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
elif num_inference_steps is not None and not hasattr(__SCREAMING_SNAKE_CASE , '''set_timesteps''' ):
lowerCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
lowerCAmelCase = scheduler.timesteps[5]
lowerCAmelCase = scheduler.timesteps[6]
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
lowerCAmelCase = DEISMultistepScheduler(**self.get_scheduler_config() )
lowerCAmelCase = self.full_loop(scheduler=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
lowerCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowerCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase = DEISMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase = self.full_loop(scheduler=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , algorithm_type='''deis''' , solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , algorithm_type=__SCREAMING_SNAKE_CASE , )
lowerCAmelCase = self.full_loop(
solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , algorithm_type=__SCREAMING_SNAKE_CASE , )
assert not torch.isnan(__SCREAMING_SNAKE_CASE ).any(), "Samples have nan numbers"
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
self.check_over_configs(lower_order_final=__SCREAMING_SNAKE_CASE )
self.check_over_configs(lower_order_final=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=__SCREAMING_SNAKE_CASE , time_step=0 )
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
lowerCAmelCase = self.full_loop()
lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
lowerCAmelCase = self.full_loop(prediction_type='''v_prediction''' )
lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.0_9_1 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(thresholding=__SCREAMING_SNAKE_CASE , dynamic_thresholding_ratio=0 )
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = 10
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
assert sample.dtype == torch.floataa
| 338 | 0 |
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
lowerCamelCase : Dict = [
"good first issue",
"good second issue",
"good difficult issue",
"feature request",
"new model",
"wip",
]
def _lowerCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Github(os.environ['GITHUB_TOKEN'] )
_SCREAMING_SNAKE_CASE =g.get_repo('huggingface/transformers' )
_SCREAMING_SNAKE_CASE =repo.get_issues(state='open' )
for issue in open_issues:
_SCREAMING_SNAKE_CASE =sorted([comment for comment in issue.get_comments()] , key=lambda _UpperCamelCase : i.created_at , reverse=_UpperCamelCase )
_SCREAMING_SNAKE_CASE =comments[0] if len(_UpperCamelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='closed' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 114 |
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class A__ ( A__ ):
A__ = 'MCTCTFeatureExtractor'
A__ = 'AutoTokenizer'
def __init__( self : Optional[Any] , _a : Optional[int] , _a : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(_a , _a )
_SCREAMING_SNAKE_CASE =self.feature_extractor
_SCREAMING_SNAKE_CASE =False
def __call__( self : Dict , *_a : str , **_a : Dict ) -> Dict:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*_a , **_a )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
_SCREAMING_SNAKE_CASE =kwargs.pop('raw_speech' )
else:
_SCREAMING_SNAKE_CASE =kwargs.pop('audio' , _a )
_SCREAMING_SNAKE_CASE =kwargs.pop('sampling_rate' , _a )
_SCREAMING_SNAKE_CASE =kwargs.pop('text' , _a )
if len(_a ) > 0:
_SCREAMING_SNAKE_CASE =args[0]
_SCREAMING_SNAKE_CASE =args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
_SCREAMING_SNAKE_CASE =self.feature_extractor(_a , *_a , sampling_rate=_a , **_a )
if text is not None:
_SCREAMING_SNAKE_CASE =self.tokenizer(_a , **_a )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_SCREAMING_SNAKE_CASE =encodings['input_ids']
return inputs
def A ( self : Any , *_a : List[str] , **_a : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.batch_decode(*_a , **_a )
def A ( self : Dict , *_a : Tuple , **_a : Dict ) -> List[str]:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor.pad(*_a , **_a )
_SCREAMING_SNAKE_CASE =kwargs.pop('input_features' , _a )
_SCREAMING_SNAKE_CASE =kwargs.pop('labels' , _a )
if len(_a ) > 0:
_SCREAMING_SNAKE_CASE =args[0]
_SCREAMING_SNAKE_CASE =args[1:]
if input_features is not None:
_SCREAMING_SNAKE_CASE =self.feature_extractor.pad(_a , *_a , **_a )
if labels is not None:
_SCREAMING_SNAKE_CASE =self.tokenizer.pad(_a , **_a )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
_SCREAMING_SNAKE_CASE =labels['input_ids']
return input_features
def A ( self : Tuple , *_a : Dict , **_a : List[Any] ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.decode(*_a , **_a )
@contextmanager
def A ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =self.tokenizer
yield
_SCREAMING_SNAKE_CASE =self.feature_extractor
_SCREAMING_SNAKE_CASE =False
| 114 | 1 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __A( a ):
snake_case_ = ['''image_processor''', '''tokenizer''']
snake_case_ = '''BlipImageProcessor'''
snake_case_ = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , _snake_case , _snake_case ) -> Dict:
'''simple docstring'''
__a = False
super().__init__(_snake_case , _snake_case )
__a = self.image_processor
def __call__( self , _snake_case = None , _snake_case = None , _snake_case = True , _snake_case = False , _snake_case = None , _snake_case = None , _snake_case = 0 , _snake_case = None , _snake_case = None , _snake_case = False , _snake_case = False , _snake_case = False , _snake_case = False , _snake_case = False , _snake_case = True , _snake_case = None , **_snake_case , ) -> BatchEncoding:
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
__a = self.tokenizer
__a = self.tokenizer(
text=_snake_case , add_special_tokens=_snake_case , padding=_snake_case , truncation=_snake_case , max_length=_snake_case , stride=_snake_case , pad_to_multiple_of=_snake_case , return_attention_mask=_snake_case , return_overflowing_tokens=_snake_case , return_special_tokens_mask=_snake_case , return_offsets_mapping=_snake_case , return_token_type_ids=_snake_case , return_length=_snake_case , verbose=_snake_case , return_tensors=_snake_case , **_snake_case , )
return text_encoding
# add pixel_values
__a = self.image_processor(_snake_case , return_tensors=_snake_case )
if text is not None:
__a = self.tokenizer(
text=_snake_case , add_special_tokens=_snake_case , padding=_snake_case , truncation=_snake_case , max_length=_snake_case , stride=_snake_case , pad_to_multiple_of=_snake_case , return_attention_mask=_snake_case , return_overflowing_tokens=_snake_case , return_special_tokens_mask=_snake_case , return_offsets_mapping=_snake_case , return_token_type_ids=_snake_case , return_length=_snake_case , verbose=_snake_case , return_tensors=_snake_case , **_snake_case , )
else:
__a = None
if text_encoding is not None:
encoding_image_processor.update(_snake_case )
return encoding_image_processor
def SCREAMING_SNAKE_CASE_ ( self , *_snake_case , **_snake_case ) -> int:
'''simple docstring'''
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , *_snake_case , **_snake_case ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.decode(*_snake_case , **_snake_case )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
__a = self.tokenizer.model_input_names
__a = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 6 |
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __A( a , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class __A( unittest.TestCase ):
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a = ort.SessionOptions()
__a = False
return options
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
__a = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=_snake_case , feature_extractor=_snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_snake_case )
__a = '''A red cat sitting on a park bench'''
__a = np.random.RandomState(0 )
__a = pipe(
prompt=_snake_case , image=_snake_case , mask_image=_snake_case , guidance_scale=7.5 , num_inference_steps=10 , generator=_snake_case , output_type='''np''' , )
__a = output.images
__a = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
__a = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
__a = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' )
__a = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=_snake_case , safety_checker=_snake_case , feature_extractor=_snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_snake_case )
__a = '''A red cat sitting on a park bench'''
__a = np.random.RandomState(0 )
__a = pipe(
prompt=_snake_case , image=_snake_case , mask_image=_snake_case , guidance_scale=7.5 , num_inference_steps=20 , generator=_snake_case , output_type='''np''' , )
__a = output.images
__a = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
__a = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 | 6 | 1 |
'''simple docstring'''
def a_ ( __snake_case : list , __snake_case : list , __snake_case : int ) -> list:
"""simple docstring"""
lowerCamelCase_ =len(__snake_case )
lowerCamelCase_ =[[0] * n for i in range(__snake_case )]
for i in range(__snake_case ):
lowerCamelCase_ =y_points[i]
for i in range(2 , __snake_case ):
for j in range(__snake_case , __snake_case ):
lowerCamelCase_ =(
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 |
'''simple docstring'''
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
a_ : Any = logging.get_logger(__name__)
a_ : Optional[int] = """https://openaipublic.azureedge.net/jukebox/models/"""
a_ : Any = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def a_ ( __snake_case : int ) -> Any:
"""simple docstring"""
if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10:
lowerCamelCase_ =key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' )
elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10:
lowerCamelCase_ =key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' )
elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10:
lowerCamelCase_ =key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' )
elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10:
lowerCamelCase_ =key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' )
if "conditioner_blocks.0." in key:
lowerCamelCase_ =key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' )
if "prime_prior" in key:
lowerCamelCase_ =key.replace('''prime_prior''' , '''encoder''' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
lowerCamelCase_ =key.replace('''.emb.''' , '''.''' )
if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('''.k''' , '''.codebook''' )
if "y_emb." in key:
return key.replace('''y_emb.''' , '''metadata_embedding.''' )
if "x_emb.emb." in key:
lowerCamelCase_ =key.replace('''0.x_emb.emb''' , '''embed_tokens''' )
if "prime_state_ln" in key:
return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' )
if ".ln" in key:
return key.replace('''.ln''' , '''.layer_norm''' )
if "_ln" in key:
return key.replace('''_ln''' , '''_layer_norm''' )
if "prime_state_proj" in key:
return key.replace('''prime_state_proj''' , '''encoder.proj_in''' )
if "prime_x_out" in key:
return key.replace('''prime_x_out''' , '''encoder.lm_head''' )
if "prior.x_out" in key:
return key.replace('''x_out''' , '''fc_proj_out''' )
if "x_emb" in key:
return key.replace('''x_emb''' , '''embed_tokens''' )
return key
def a_ ( __snake_case : Dict , __snake_case : int , __snake_case : Dict , __snake_case : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ ={}
import re
lowerCamelCase_ =re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
lowerCamelCase_ =re.compile(
r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
lowerCamelCase_ =re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
lowerCamelCase_ =re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
lowerCamelCase_ =re.compile(
r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
lowerCamelCase_ =re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
lowerCamelCase_ =re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' )
lowerCamelCase_ =re.compile(
r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
lowerCamelCase_ =re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(__snake_case ):
lowerCamelCase_ =re_encoder_block_conv_in.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] )
lowerCamelCase_ =F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'''
lowerCamelCase_ =re_encoder_block_conv_in.sub(__snake_case , __snake_case )
elif re_encoder_block_resnet.fullmatch(__snake_case ):
lowerCamelCase_ =re_encoder_block_resnet.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] )
lowerCamelCase_ ={'''1''': 1, '''3''': 2}[groups[-2]]
lowerCamelCase_ =F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'''
lowerCamelCase_ =F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
lowerCamelCase_ =prefix + resnet_block
lowerCamelCase_ =re_encoder_block_resnet.sub(__snake_case , __snake_case )
elif re_encoder_block_proj_out.fullmatch(__snake_case ):
lowerCamelCase_ =re_encoder_block_proj_out.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =F'''encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'''
lowerCamelCase_ =re_encoder_block_proj_out.sub(__snake_case , __snake_case )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(__snake_case ):
lowerCamelCase_ =re_decoder_block_conv_out.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] ) - 2
lowerCamelCase_ =F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'''
lowerCamelCase_ =re_decoder_block_conv_out.sub(__snake_case , __snake_case )
elif re_decoder_block_resnet.fullmatch(__snake_case ):
lowerCamelCase_ =re_decoder_block_resnet.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] ) - 2
lowerCamelCase_ ={'''1''': 1, '''3''': 2}[groups[-2]]
lowerCamelCase_ =F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'''
lowerCamelCase_ =F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
lowerCamelCase_ =prefix + resnet_block
lowerCamelCase_ =re_decoder_block_resnet.sub(__snake_case , __snake_case )
elif re_decoder_block_proj_in.fullmatch(__snake_case ):
lowerCamelCase_ =re_decoder_block_proj_in.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =F'''decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'''
lowerCamelCase_ =re_decoder_block_proj_in.sub(__snake_case , __snake_case )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(__snake_case ):
lowerCamelCase_ =re_prior_cond_conv_out.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[1] ) * 2 + int(groups[2] ) - 2
lowerCamelCase_ =F'''conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'''
lowerCamelCase_ =re_prior_cond_conv_out.sub(__snake_case , __snake_case )
elif re_prior_cond_resnet.fullmatch(__snake_case ):
lowerCamelCase_ =re_prior_cond_resnet.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[1] ) * 2 + int(groups[2] ) - 2
lowerCamelCase_ ={'''1''': 1, '''3''': 2}[groups[-2]]
lowerCamelCase_ =F'''conditioner_blocks.upsampler.upsample_block.{block_index}.'''
lowerCamelCase_ =F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
lowerCamelCase_ =prefix + resnet_block
lowerCamelCase_ =re_prior_cond_resnet.sub(__snake_case , __snake_case )
elif re_prior_cond_proj_in.fullmatch(__snake_case ):
lowerCamelCase_ =re_prior_cond_proj_in.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =F'''conditioner_blocks.upsampler.proj_in.{groups[-1]}'''
lowerCamelCase_ =re_prior_cond_proj_in.sub(__snake_case , __snake_case )
# keep original key
else:
lowerCamelCase_ =original_key
lowerCamelCase_ =replace_key(__snake_case )
if F'''{key_prefix}.{key}''' not in model_state_dict or key is None:
print(F'''failed converting {original_key} to {key}, does not match''' )
# handle missmatched shape
elif value.shape != model_state_dict[F'''{key_prefix}.{key}'''].shape:
lowerCamelCase_ =model_state_dict[F'''{key_prefix}.{key}''']
print(F'''{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match''' )
lowerCamelCase_ =original_key
lowerCamelCase_ =original_key
lowerCamelCase_ =value
return new_dict
@torch.no_grad()
def a_ ( __snake_case : List[str]=None , __snake_case : Tuple=None ) -> Union[str, Any]:
"""simple docstring"""
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' ):
lowerCamelCase_ =requests.get(F'''{PREFIX}{file}''' , allow_redirects=__snake_case )
os.makedirs(F'''{pytorch_dump_folder_path}/''' , exist_ok=__snake_case )
open(F'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' , '''wb''' ).write(r.content )
lowerCamelCase_ =MODEL_MAPPING[model_name.split('''/''' )[-1]]
lowerCamelCase_ =JukeboxConfig.from_pretrained(__snake_case )
lowerCamelCase_ =JukeboxModel(__snake_case )
lowerCamelCase_ =[]
lowerCamelCase_ ={}
for i, dict_name in enumerate(__snake_case ):
lowerCamelCase_ =torch.load(F'''{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}''' )['''model''']
lowerCamelCase_ ={}
for k in old_dic.keys():
if k.endswith('''.b''' ):
lowerCamelCase_ =old_dic[k]
elif k.endswith('''.w''' ):
lowerCamelCase_ =old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
lowerCamelCase_ =old_dic[k]
else:
lowerCamelCase_ =old_dic[k]
lowerCamelCase_ ='''vqvae''' if i == 0 else F'''priors.{3 - i}'''
lowerCamelCase_ =fix_jukebox_keys(__snake_case , model.state_dict() , __snake_case , __snake_case )
weight_dict.append(__snake_case )
lowerCamelCase_ =weight_dict.pop(0 )
model.vqvae.load_state_dict(__snake_case )
for i in range(len(__snake_case ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
with open(F'''{pytorch_dump_folder_path}/mapping.json''' , '''w''' ) as txtfile:
json.dump(__snake_case , __snake_case )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__snake_case )
return weight_dict
if __name__ == "__main__":
a_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
a_ : Optional[int] = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 6 | 1 |
'''simple docstring'''
from math import loga
def a__ ( lowerCAmelCase__ ) -> int:
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError('''Input value must be a \'int\' type''' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 181 |
'''simple docstring'''
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : Tuple ):
'''simple docstring'''
debug_launcher(test_script.main )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
debug_launcher(test_ops.main )
| 181 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _UpperCAmelCase ( lowerCAmelCase_ ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE,"""width_multiplier""" ) )
class _UpperCAmelCase :
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=13,__SCREAMING_SNAKE_CASE=64,__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE="swish",__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=32,__SCREAMING_SNAKE_CASE=0.1,__SCREAMING_SNAKE_CASE=0.02,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=10,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=0.25,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=0.0,):
'''simple docstring'''
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = make_divisible(5_12 * width_multiplier,divisor=8 )
__lowerCAmelCase = hidden_act
__lowerCAmelCase = conv_kernel_size
__lowerCAmelCase = output_stride
__lowerCAmelCase = classifier_dropout_prob
__lowerCAmelCase = use_labels
__lowerCAmelCase = is_training
__lowerCAmelCase = num_labels
__lowerCAmelCase = initializer_range
__lowerCAmelCase = scope
__lowerCAmelCase = width_multiplier
__lowerCAmelCase = ffn_dropout
__lowerCAmelCase = attn_dropout
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size],self.num_labels )
__lowerCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size],self.num_labels )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCamelCase__ ( self ):
'''simple docstring'''
return MobileViTVaConfig(
image_size=self.image_size,patch_size=self.patch_size,num_channels=self.num_channels,hidden_act=self.hidden_act,conv_kernel_size=self.conv_kernel_size,output_stride=self.output_stride,classifier_dropout_prob=self.classifier_dropout_prob,initializer_range=self.initializer_range,width_multiplier=self.width_multiplier,ffn_dropout=self.ffn_dropout_prob,attn_dropout=self.attn_dropout_prob,)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = MobileViTVaModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.last_hidden_state.shape,(
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
),)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = MobileViTVaForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE,labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = MobileViTVaForSemanticSegmentation(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
),)
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE,labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
),)
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
a : Optional[int] =(
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
a : Optional[Any] =(
{
"""feature-extraction""": MobileViTVaModel,
"""image-classification""": MobileViTVaForImageClassification,
"""image-segmentation""": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a : str =False
a : Tuple =False
a : Tuple =False
a : Union[str, Any] =False
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = MobileViTVaModelTester(self )
__lowerCAmelCase = MobileViTVaConfigTester(self,config_class=__SCREAMING_SNAKE_CASE,has_text_modality=__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViTV2 does not use inputs_embeds""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="""MobileViTV2 does not support input and output embeddings""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="""MobileViTV2 does not output attentions""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="""Got `CUDA error: misaligned address` for tests after this one being run.""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1],__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
def check_hidden_states_output(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = outputs.hidden_states
__lowerCAmelCase = 5
self.assertEqual(len(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__lowerCAmelCase = 2
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ),[self.model_tester.image_size // divisor, self.model_tester.image_size // divisor],)
divisor *= 2
self.assertEqual(self.model_tester.output_stride,divisor // 2 )
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__SCREAMING_SNAKE_CASE )
@slow
def lowerCamelCase__ ( self ):
'''simple docstring'''
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = MobileViTVaModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( ) -> List[str]:
__lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return (
MobileViTImageProcessor.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" )
if is_vision_available()
else None
)
@slow
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = MobileViTVaForImageClassification.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ).to(
__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=__SCREAMING_SNAKE_CASE,return_tensors="""pt""" ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
__lowerCAmelCase = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
__lowerCAmelCase = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = torch.tensor([-1.63_36e00, -7.32_04e-02, -5.18_83e-01] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3],__SCREAMING_SNAKE_CASE,atol=1e-4 ) )
@slow
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__lowerCAmelCase = model.to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=__SCREAMING_SNAKE_CASE,return_tensors="""pt""" ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
__lowerCAmelCase = model(**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = outputs.logits
# verify the logits
__lowerCAmelCase = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
],device=__SCREAMING_SNAKE_CASE,)
self.assertTrue(torch.allclose(logits[0, :3, :3, :3],__SCREAMING_SNAKE_CASE,atol=1e-4 ) )
@slow
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__lowerCAmelCase = model.to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=__SCREAMING_SNAKE_CASE,return_tensors="""pt""" ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
__lowerCAmelCase = model(**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = outputs.logits.detach().cpu()
__lowerCAmelCase = image_processor.post_process_semantic_segmentation(outputs=__SCREAMING_SNAKE_CASE,target_sizes=[(50, 60)] )
__lowerCAmelCase = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = image_processor.post_process_semantic_segmentation(outputs=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape,__SCREAMING_SNAKE_CASE )
| 46 |
'''simple docstring'''
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def _lowerCAmelCase ( lowercase ) -> List[Any]:
__lowerCAmelCase = [False] * len(lowercase )
__lowerCAmelCase = [-1] * len(lowercase )
def dfs(lowercase , lowercase ):
__lowerCAmelCase = True
__lowerCAmelCase = c
for u in graph[v]:
if not visited[u]:
dfs(lowercase , 1 - c )
for i in range(len(lowercase ) ):
if not visited[i]:
dfs(lowercase , 0 )
for i in range(len(lowercase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_a : str = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 46 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCAmelCase__ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-classification/requirements.txt''')
lowerCAmelCase__ = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
lowerCAmelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
with open(lowerCamelCase__ , "rb" ) as f:
lowercase__ : Union[str, Any] = Image.open(lowerCamelCase__ )
return im.convert("RGB" )
@dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = field(
default=_UpperCamelCase , metadata={
"""help""": """Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)."""
} , )
lowercase_ = field(
default=_UpperCamelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
lowercase_ = field(default=_UpperCamelCase , metadata={"""help""": """A folder containing the training data."""} )
lowercase_ = field(default=_UpperCamelCase , metadata={"""help""": """A folder containing the validation data."""} )
lowercase_ = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
lowercase_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
lowercase_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def snake_case ( self : Union[str, Any] ):
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"You must specify either a dataset name from the hub or a train and/or validation directory." )
@dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = field(
default="""google/vit-base-patch16-224-in21k""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , )
lowercase_ = field(
default=_UpperCamelCase , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(_UpperCamelCase )} , )
lowercase_ = field(
default=_UpperCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowercase_ = field(
default=_UpperCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
lowercase_ = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
lowercase_ = field(default=_UpperCamelCase , metadata={"""help""": """Name or path of preprocessor config."""} )
lowercase_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
lowercase_ = field(
default=_UpperCamelCase , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : List[Any] = torch.stack([example["pixel_values"] for example in examples] )
lowercase__ : Optional[Any] = torch.tensor([example["labels"] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__ , lowercase__ , lowercase__ : Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__ , lowercase__ , lowercase__ : Optional[int] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_image_classification" , lowerCamelCase__ , lowerCamelCase__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase__ : Optional[Any] = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase__ )
transformers.utils.logging.set_verbosity(lowerCamelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowercase__ : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase__ : int = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
lowercase__ : Dict = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task="image-classification" , use_auth_token=True if model_args.use_auth_token else None , )
else:
lowercase__ : Any = {}
if data_args.train_dir is not None:
lowercase__ : Any = os.path.join(data_args.train_dir , "**" )
if data_args.validation_dir is not None:
lowercase__ : Optional[Any] = os.path.join(data_args.validation_dir , "**" )
lowercase__ : Any = load_dataset(
"imagefolder" , data_files=lowerCamelCase__ , cache_dir=model_args.cache_dir , task="image-classification" , )
# If we don't have a validation split, split off a percentage of train as validation.
lowercase__ : Tuple = None if "validation" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowerCamelCase__ ) and data_args.train_val_split > 0.0:
lowercase__ : List[Any] = dataset["train"].train_test_split(data_args.train_val_split )
lowercase__ : Optional[int] = split["train"]
lowercase__ : Dict = split["test"]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
lowercase__ : List[str] = dataset["train"].features["labels"].names
lowercase__ , lowercase__ : Optional[int] = {}, {}
for i, label in enumerate(lowerCamelCase__ ):
lowercase__ : int = str(lowerCamelCase__ )
lowercase__ : Optional[int] = label
# Load the accuracy metric from the datasets package
lowercase__ : Any = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCamelCase__ ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
lowercase__ : Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(lowerCamelCase__ ) , labelaid=lowerCamelCase__ , idalabel=lowerCamelCase__ , finetuning_task="image-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase__ : Optional[Any] = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
lowercase__ : Optional[int] = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
lowercase__ : Dict = image_processor.size["shortest_edge"]
else:
lowercase__ : Any = (image_processor.size["height"], image_processor.size["width"])
lowercase__ : Optional[Any] = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
lowercase__ : Any = Compose(
[
RandomResizedCrop(lowerCamelCase__ ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
lowercase__ : Dict = Compose(
[
Resize(lowerCamelCase__ ),
CenterCrop(lowerCamelCase__ ),
ToTensor(),
normalize,
] )
def train_transforms(lowerCamelCase__ ):
lowercase__ : Optional[Any] = [
_train_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]
]
return example_batch
def val_transforms(lowerCamelCase__ ):
lowercase__ : Dict = [_val_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
lowercase__ : Dict = (
dataset["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(lowerCamelCase__ )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
lowercase__ : Any = (
dataset["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(lowerCamelCase__ )
# Initalize our trainer
lowercase__ : Optional[int] = Trainer(
model=lowerCamelCase__ , args=lowerCamelCase__ , train_dataset=dataset["train"] if training_args.do_train else None , eval_dataset=dataset["validation"] if training_args.do_eval else None , compute_metrics=lowerCamelCase__ , tokenizer=lowerCamelCase__ , data_collator=lowerCamelCase__ , )
# Training
if training_args.do_train:
lowercase__ : Optional[Any] = None
if training_args.resume_from_checkpoint is not None:
lowercase__ : Optional[int] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase__ : int = last_checkpoint
lowercase__ : Optional[int] = trainer.train(resume_from_checkpoint=lowerCamelCase__ )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase__ : int = trainer.evaluate()
trainer.log_metrics("eval" , lowerCamelCase__ )
trainer.save_metrics("eval" , lowerCamelCase__ )
# Write model card and (optionally) push to hub
lowercase__ : Dict = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "image-classification",
"dataset": data_args.dataset_name,
"tags": ["image-classification", "vision"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase__ )
else:
trainer.create_model_card(**lowerCamelCase__ )
if __name__ == "__main__":
main()
| 130 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = ['''model.decoder.embed_positions.weights''']
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
if "emb" in name:
lowercase__ : int = name.replace("emb" , "model.decoder.embed_tokens" )
if "transformer" in name:
lowercase__ : Any = name.replace("transformer" , "model.decoder" )
if "cross_attention" in name:
lowercase__ : int = name.replace("cross_attention" , "encoder_attn" )
if "linear1" in name:
lowercase__ : int = name.replace("linear1" , "fc1" )
if "linear2" in name:
lowercase__ : int = name.replace("linear2" , "fc2" )
if "norm1" in name:
lowercase__ : Union[str, Any] = name.replace("norm1" , "self_attn_layer_norm" )
if "norm_cross" in name:
lowercase__ : Union[str, Any] = name.replace("norm_cross" , "encoder_attn_layer_norm" )
if "norm2" in name:
lowercase__ : Dict = name.replace("norm2" , "final_layer_norm" )
if "out_norm" in name:
lowercase__ : Dict = name.replace("out_norm" , "model.decoder.layer_norm" )
if "linears" in name:
lowercase__ : Union[str, Any] = name.replace("linears" , "lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
lowercase__ : Union[str, Any] = name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" )
return name
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Optional[Any] = list(state_dict.keys() )
lowercase__ : Dict = {}
for key in keys:
lowercase__ : Tuple = state_dict.pop(lowerCamelCase__ )
lowercase__ : Union[str, Any] = rename_keys(lowerCamelCase__ )
if "in_proj_weight" in key:
# split fused qkv proj
lowercase__ : Optional[int] = val[:hidden_size, :]
lowercase__ : Optional[int] = val[hidden_size : 2 * hidden_size, :]
lowercase__ : List[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
lowercase__ : Union[str, Any] = val
else:
lowercase__ : List[Any] = val
return state_dict, enc_dec_proj_state_dict
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
if checkpoint == "small":
# default config values
lowercase__ : Optional[Any] = 1_024
lowercase__ : int = 24
lowercase__ : Optional[Any] = 16
elif checkpoint == "medium":
lowercase__ : str = 1_536
lowercase__ : Union[str, Any] = 48
lowercase__ : Optional[int] = 24
elif checkpoint == "large":
lowercase__ : Tuple = 2_048
lowercase__ : Union[str, Any] = 48
lowercase__ : Dict = 32
else:
raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
lowercase__ : int = MusicgenDecoderConfig(
hidden_size=lowerCamelCase__ , ffn_dim=hidden_size * 4 , num_hidden_layers=lowerCamelCase__ , num_attention_heads=lowerCamelCase__ , )
return config
@torch.no_grad()
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__="cpu" ):
"""simple docstring"""
lowercase__ : List[Any] = MusicGen.get_pretrained(lowerCamelCase__ , device=lowerCamelCase__ )
lowercase__ : str = decoder_config_from_checkpoint(lowerCamelCase__ )
lowercase__ : Optional[Any] = fairseq_model.lm.state_dict()
lowercase__ , lowercase__ : Tuple = rename_state_dict(
lowerCamelCase__ , hidden_size=decoder_config.hidden_size )
lowercase__ : str = TaEncoderModel.from_pretrained("t5-base" )
lowercase__ : Tuple = EncodecModel.from_pretrained("facebook/encodec_32khz" )
lowercase__ : List[str] = MusicgenForCausalLM(lowerCamelCase__ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
lowercase__ , lowercase__ : List[str] = decoder.load_state_dict(lowerCamelCase__ , strict=lowerCamelCase__ )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" )
if len(lowerCamelCase__ ) > 0:
raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
lowercase__ : Any = MusicgenForConditionalGeneration(text_encoder=lowerCamelCase__ , audio_encoder=lowerCamelCase__ , decoder=lowerCamelCase__ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(lowerCamelCase__ )
# check we can do a forward pass
lowercase__ : List[str] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
lowercase__ : Any = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
lowercase__ : List[str] = model(input_ids=lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
lowercase__ : List[Any] = AutoTokenizer.from_pretrained("t5-base" )
lowercase__ : Dict = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" )
lowercase__ : Optional[Any] = MusicgenProcessor(feature_extractor=lowerCamelCase__ , tokenizer=lowerCamelCase__ )
# set the appropriate bos/pad token ids
lowercase__ : List[Any] = 2_048
lowercase__ : List[Any] = 2_048
# set other default generation config params
lowercase__ : str = int(30 * audio_encoder.config.frame_rate )
lowercase__ : List[Any] = True
lowercase__ : Dict = 3.0
if pytorch_dump_folder is not None:
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(lowerCamelCase__ )
processor.save_pretrained(lowerCamelCase__ )
if repo_id:
logger.info(F"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(lowerCamelCase__ )
processor.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint''',
default='''small''',
type=str,
help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''',
)
parser.add_argument(
'''--pytorch_dump_folder''',
required=True,
default=None,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
parser.add_argument(
'''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 130 | 1 |
'''simple docstring'''
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
lowerCAmelCase : List[str] = [
"""kernels/rwkv/wkv_cuda.cu""",
"""kernels/rwkv/wkv_op.cpp""",
"""kernels/deformable_detr/ms_deform_attn.h""",
"""kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh""",
"""models/graphormer/algos_graphormer.pyx""",
]
def lowercase (_A ):
"""simple docstring"""
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument("""--check_lib""", action="""store_true""", help="""Whether to check the build or the actual package.""")
lowerCAmelCase : Dict = parser.parse_args()
if args.check_lib:
lowerCAmelCase : Union[str, Any] = importlib.import_module("""transformers""")
lowerCAmelCase : int = Path(transformers_module.__file__).parent
else:
lowerCAmelCase : int = Path.cwd() / """build/lib/transformers"""
if not test_custom_files_are_present(transformers_path):
raise ValueError("""The built release does not contain the custom files. Fix this before going further!""")
| 25 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase : Union[str, Any] = {
"""configuration_resnet""": ["""RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ResNetConfig""", """ResNetOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict = [
"""RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ResNetForImageClassification""",
"""ResNetModel""",
"""ResNetPreTrainedModel""",
"""ResNetBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str = [
"""TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFResNetForImageClassification""",
"""TFResNetModel""",
"""TFResNetPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = [
"""FlaxResNetForImageClassification""",
"""FlaxResNetModel""",
"""FlaxResNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
lowerCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 25 | 1 |
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def UpperCAmelCase_ ( __lowerCAmelCase ) -> Dict:
__lowercase : Any = model.config
__lowercase : Any = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
__lowercase : Union[str, Any] = MBartConfig(
is_decoder=__lowerCAmelCase , is_encoder_decoder=__lowerCAmelCase , add_cross_attention=__lowerCAmelCase , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=__lowerCAmelCase , add_final_layer_norm=__lowerCAmelCase , )
return encoder_config, decoder_config
def UpperCAmelCase_ ( __lowerCAmelCase ) -> List[Any]:
if "encoder.model" in name:
__lowercase : int = name.replace('''encoder.model''' , '''encoder''' )
if "decoder.model" in name:
__lowercase : Tuple = name.replace('''decoder.model''' , '''decoder''' )
if "patch_embed.proj" in name:
__lowercase : Dict = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
__lowercase : int = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if name.startswith('''encoder''' ):
if "layers" in name:
__lowercase : Optional[Any] = '''encoder.''' + name
if "attn.proj" in name:
__lowercase : Union[str, Any] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "mask" not in name:
__lowercase : Tuple = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
__lowercase : str = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
__lowercase : List[str] = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
__lowercase : List[str] = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
__lowercase : Optional[int] = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
__lowercase : Any = '''encoder.layernorm.weight'''
if name == "encoder.norm.bias":
__lowercase : List[str] = '''encoder.layernorm.bias'''
return name
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
for key in orig_state_dict.copy().keys():
__lowercase : int = orig_state_dict.pop(__lowerCAmelCase )
if "qkv" in key:
__lowercase : Optional[int] = key.split('''.''' )
__lowercase : List[Any] = int(key_split[3] )
__lowercase : Dict = int(key_split[5] )
__lowercase : Optional[Any] = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__lowercase : List[Any] = val[:dim, :]
__lowercase : Any = val[dim : dim * 2, :]
__lowercase : Union[str, Any] = val[-dim:, :]
else:
__lowercase : Union[str, Any] = val[:dim]
__lowercase : Optional[int] = val[dim : dim * 2]
__lowercase : List[str] = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
__lowercase : List[Any] = val
return orig_state_dict
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=False ) -> List[str]:
# load original model
__lowercase : Any = DonutModel.from_pretrained(__lowerCAmelCase ).eval()
# load HuggingFace model
__lowercase , __lowercase : List[Any] = get_configs(__lowerCAmelCase )
__lowercase : Tuple = DonutSwinModel(__lowerCAmelCase )
__lowercase : Optional[int] = MBartForCausalLM(__lowerCAmelCase )
__lowercase : str = VisionEncoderDecoderModel(encoder=__lowerCAmelCase , decoder=__lowerCAmelCase )
model.eval()
__lowercase : str = original_model.state_dict()
__lowercase : List[str] = convert_state_dict(__lowerCAmelCase , __lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
# verify results on scanned document
__lowercase : str = load_dataset('''hf-internal-testing/example-documents''' )
__lowercase : str = dataset['''test'''][0]['''image'''].convert('''RGB''' )
__lowercase : Optional[Any] = XLMRobertaTokenizerFast.from_pretrained(__lowerCAmelCase , from_slow=__lowerCAmelCase )
__lowercase : int = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
__lowercase : int = DonutProcessor(__lowerCAmelCase , __lowerCAmelCase )
__lowercase : List[Any] = processor(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
__lowercase : Dict = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
__lowercase : List[str] = '''When is the coffee break?'''
__lowercase : List[Any] = task_prompt.replace('''{user_input}''' , __lowerCAmelCase )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
__lowercase : int = '''<s_rvlcdip>'''
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
__lowercase : Union[str, Any] = '''<s_cord>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
__lowercase : Union[str, Any] = '''s_cord-v2>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
__lowercase : Any = '''<s_zhtrainticket>'''
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
__lowercase : Union[str, Any] = '''hello world'''
else:
raise ValueError('''Model name not supported''' )
__lowercase : int = original_model.decoder.tokenizer(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors='''pt''' )[
'''input_ids'''
]
__lowercase : int = original_model.encoder.model.patch_embed(__lowerCAmelCase )
__lowercase , __lowercase : int = model.encoder.embeddings(__lowerCAmelCase )
assert torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 )
# verify encoder hidden states
__lowercase : Optional[int] = original_model.encoder(__lowerCAmelCase )
__lowercase : Tuple = model.encoder(__lowerCAmelCase ).last_hidden_state
assert torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-2 )
# verify decoder hidden states
__lowercase : int = original_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).logits
__lowercase : List[str] = model(__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase ).logits
assert torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'Saving model and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
if push_to_hub:
model.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
processor.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
if __name__ == "__main__":
__lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="naver-clova-ix/donut-base-finetuned-docvqa",
required=False,
type=str,
help="Name of the original model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
required=False,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub.",
)
__lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 156 |
def UpperCAmelCase_ ( __lowerCAmelCase ) -> int:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or number < 0:
raise ValueError('''Input must be a non-negative integer''' )
__lowercase : List[str] = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 156 | 1 |
def A_ ( _lowerCAmelCase = 1000 ) -> int:
UpperCamelCase : str = 2**power
UpperCamelCase : str = 0
while n:
UpperCamelCase , UpperCamelCase : Dict = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 140 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
__lowerCamelCase : Any = collections.namedtuple("""_Datasets""", ["""train""", """validation""", """test"""])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
__lowerCamelCase : str = """https://storage.googleapis.com/cvdf-datasets/mnist/"""
def A_ ( _lowerCAmelCase ) -> Optional[Any]:
UpperCamelCase : Tuple = numpy.dtype(numpy.uintaa ).newbyteorder(">" )
return numpy.frombuffer(bytestream.read(4 ) , dtype=_lowerCAmelCase )[0]
@deprecated(_lowerCAmelCase , "Please use tf.data to implement this functionality." )
def A_ ( _lowerCAmelCase ) -> int:
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=_lowerCAmelCase ) as bytestream:
UpperCamelCase : Dict = _readaa(_lowerCAmelCase )
if magic != 2051:
raise ValueError(
"Invalid magic number %d in MNIST image file: %s" % (magic, f.name) )
UpperCamelCase : Optional[int] = _readaa(_lowerCAmelCase )
UpperCamelCase : int = _readaa(_lowerCAmelCase )
UpperCamelCase : Union[str, Any] = _readaa(_lowerCAmelCase )
UpperCamelCase : List[Any] = bytestream.read(rows * cols * num_images )
UpperCamelCase : List[str] = numpy.frombuffer(_lowerCAmelCase , dtype=numpy.uinta )
UpperCamelCase : Optional[Any] = data.reshape(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , 1 )
return data
@deprecated(_lowerCAmelCase , "Please use tf.one_hot on tensors." )
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
UpperCamelCase : List[str] = labels_dense.shape[0]
UpperCamelCase : str = numpy.arange(_lowerCAmelCase ) * num_classes
UpperCamelCase : Optional[Any] = numpy.zeros((num_labels, num_classes) )
UpperCamelCase : Dict = 1
return labels_one_hot
@deprecated(_lowerCAmelCase , "Please use tf.data to implement this functionality." )
def A_ ( _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=10 ) -> str:
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=_lowerCAmelCase ) as bytestream:
UpperCamelCase : int = _readaa(_lowerCAmelCase )
if magic != 2049:
raise ValueError(
"Invalid magic number %d in MNIST label file: %s" % (magic, f.name) )
UpperCamelCase : List[str] = _readaa(_lowerCAmelCase )
UpperCamelCase : List[Any] = bytestream.read(_lowerCAmelCase )
UpperCamelCase : List[str] = numpy.frombuffer(_lowerCAmelCase , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(_lowerCAmelCase , _lowerCAmelCase )
return labels
class A__ :
@deprecated(
A_ , "Please use alternatives such as official/mnist/_DataSet.py"
" from tensorflow/models." , )
def __init__( self , A_ , A_ , A_=False , A_=False , A_=dtypes.floataa , A_=True , A_=None , ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : int = random_seed.get_seed(A_ )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
UpperCamelCase : Optional[Any] = dtypes.as_dtype(A_ ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype )
if fake_data:
UpperCamelCase : List[str] = 1_0000
UpperCamelCase : int = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F"""images.shape: {images.shape} labels.shape: {labels.shape}"""
UpperCamelCase : Optional[Any] = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
UpperCamelCase : int = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
UpperCamelCase : str = images.astype(numpy.floataa )
UpperCamelCase : str = numpy.multiply(A_ , 1.0 / 2_55.0 )
UpperCamelCase : Optional[int] = images
UpperCamelCase : str = labels
UpperCamelCase : Optional[Any] = 0
UpperCamelCase : Optional[int] = 0
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self._images
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self._labels
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self._num_examples
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self._epochs_completed
def __UpperCamelCase( self , A_ , A_=False , A_=True ):
'''simple docstring'''
if fake_data:
UpperCamelCase : Optional[int] = [1] * 784
UpperCamelCase : Optional[Any] = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(A_ )],
[fake_label for _ in range(A_ )],
)
UpperCamelCase : Optional[Any] = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
UpperCamelCase : Optional[Any] = numpy.arange(self._num_examples )
numpy.random.shuffle(A_ )
UpperCamelCase : int = self.images[perma]
UpperCamelCase : Any = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
UpperCamelCase : List[Any] = self._num_examples - start
UpperCamelCase : Union[str, Any] = self._images[start : self._num_examples]
UpperCamelCase : str = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
UpperCamelCase : Union[str, Any] = numpy.arange(self._num_examples )
numpy.random.shuffle(A_ )
UpperCamelCase : Union[str, Any] = self.images[perm]
UpperCamelCase : Union[str, Any] = self.labels[perm]
# Start next epoch
UpperCamelCase : Tuple = 0
UpperCamelCase : Tuple = batch_size - rest_num_examples
UpperCamelCase : List[str] = self._index_in_epoch
UpperCamelCase : Dict = self._images[start:end]
UpperCamelCase : int = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
UpperCamelCase : Union[str, Any] = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(_lowerCAmelCase , "Please write your own downloading logic." )
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
if not gfile.Exists(_lowerCAmelCase ):
gfile.MakeDirs(_lowerCAmelCase )
UpperCamelCase : Optional[Any] = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
if not gfile.Exists(_lowerCAmelCase ):
urllib.request.urlretrieve(_lowerCAmelCase , _lowerCAmelCase ) # noqa: S310
with gfile.GFile(_lowerCAmelCase ) as f:
UpperCamelCase : Optional[int] = f.size()
print("Successfully downloaded" , _lowerCAmelCase , _lowerCAmelCase , "bytes." )
return filepath
@deprecated(
_lowerCAmelCase , "Please use alternatives such as:" " tensorflow_datasets.load('mnist')" )
def A_ ( _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=dtypes.floataa , _lowerCAmelCase=True , _lowerCAmelCase=5000 , _lowerCAmelCase=None , _lowerCAmelCase=DEFAULT_SOURCE_URL , ) -> List[str]:
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=_lowerCAmelCase , one_hot=_lowerCAmelCase , dtype=_lowerCAmelCase , seed=_lowerCAmelCase )
UpperCamelCase : Any = fake()
UpperCamelCase : List[str] = fake()
UpperCamelCase : Union[str, Any] = fake()
return _Datasets(train=_lowerCAmelCase , validation=_lowerCAmelCase , test=_lowerCAmelCase )
if not source_url: # empty string check
UpperCamelCase : str = DEFAULT_SOURCE_URL
UpperCamelCase : List[str] = "train-images-idx3-ubyte.gz"
UpperCamelCase : Optional[int] = "train-labels-idx1-ubyte.gz"
UpperCamelCase : List[str] = "t10k-images-idx3-ubyte.gz"
UpperCamelCase : Union[str, Any] = "t10k-labels-idx1-ubyte.gz"
UpperCamelCase : Optional[int] = _maybe_download(
_lowerCAmelCase , _lowerCAmelCase , source_url + train_images_file )
with gfile.Open(_lowerCAmelCase , "rb" ) as f:
UpperCamelCase : List[str] = _extract_images(_lowerCAmelCase )
UpperCamelCase : Dict = _maybe_download(
_lowerCAmelCase , _lowerCAmelCase , source_url + train_labels_file )
with gfile.Open(_lowerCAmelCase , "rb" ) as f:
UpperCamelCase : List[Any] = _extract_labels(_lowerCAmelCase , one_hot=_lowerCAmelCase )
UpperCamelCase : Any = _maybe_download(
_lowerCAmelCase , _lowerCAmelCase , source_url + test_images_file )
with gfile.Open(_lowerCAmelCase , "rb" ) as f:
UpperCamelCase : Any = _extract_images(_lowerCAmelCase )
UpperCamelCase : List[str] = _maybe_download(
_lowerCAmelCase , _lowerCAmelCase , source_url + test_labels_file )
with gfile.Open(_lowerCAmelCase , "rb" ) as f:
UpperCamelCase : str = _extract_labels(_lowerCAmelCase , one_hot=_lowerCAmelCase )
if not 0 <= validation_size <= len(_lowerCAmelCase ):
UpperCamelCase : Any = (
"Validation size should be between 0 and "
F"""{len(_lowerCAmelCase )}. Received: {validation_size}."""
)
raise ValueError(_lowerCAmelCase )
UpperCamelCase : str = train_images[:validation_size]
UpperCamelCase : int = train_labels[:validation_size]
UpperCamelCase : List[str] = train_images[validation_size:]
UpperCamelCase : Union[str, Any] = train_labels[validation_size:]
UpperCamelCase : List[str] = {"dtype": dtype, "reshape": reshape, "seed": seed}
UpperCamelCase : List[str] = _DataSet(_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
UpperCamelCase : List[str] = _DataSet(_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
UpperCamelCase : Any = _DataSet(_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
return _Datasets(train=_lowerCAmelCase , validation=_lowerCAmelCase , test=_lowerCAmelCase )
| 140 | 1 |
def lowerCamelCase__ ( A__ : List[str] ):
'''simple docstring'''
if collection == []:
return []
# get some information about the collection
__lowerCamelCase = len(A__ )
__lowerCamelCase = max(A__ )
__lowerCamelCase = min(A__ )
# create the counting array
__lowerCamelCase = coll_max + 1 - coll_min
__lowerCamelCase = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , A__ ):
__lowerCamelCase = counting_arr[i] + counting_arr[i - 1]
# create the output collection
__lowerCamelCase = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , A__ ) ):
__lowerCamelCase = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
return "".join([chr(A__ ) for i in counting_sort([ord(A__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('thisisthestring') == "eghhiiinrsssttt"
UpperCAmelCase_ = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ = [int(item) for item in user_input.split(',')]
print(counting_sort(unsorted))
| 12 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
UpperCAmelCase_ = 16
UpperCAmelCase_ = 32
def lowerCamelCase__ ( A__ : Accelerator , A__ : int = 16 , A__ : str = "bert-base-cased" ):
'''simple docstring'''
__lowerCamelCase = AutoTokenizer.from_pretrained(A__ )
__lowerCamelCase = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(A__ : int ):
# max_length=None => use the model max length (it's actually the default)
__lowerCamelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=A__ , max_length=A__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__lowerCamelCase = datasets.map(
A__ , batched=A__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=A__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCamelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(A__ : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(A__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
__lowerCamelCase = DataLoader(
tokenized_datasets["""train"""] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
__lowerCamelCase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
return train_dataloader, eval_dataloader
def lowerCamelCase__ ( A__ : Tuple , A__ : Union[str, Any] , A__ : Tuple , A__ : Optional[Any] ):
'''simple docstring'''
model.eval()
__lowerCamelCase = 0
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCamelCase = model(**A__ )
__lowerCamelCase = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__lowerCamelCase, __lowerCamelCase = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(A__ ) - 1:
__lowerCamelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__lowerCamelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=A__ , references=A__ , )
__lowerCamelCase = metric.compute()
return eval_metric["accuracy"]
def lowerCamelCase__ ( A__ : Optional[Any] , A__ : Optional[int] ):
'''simple docstring'''
__lowerCamelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCamelCase = config["""lr"""]
__lowerCamelCase = int(config["""num_epochs"""] )
__lowerCamelCase = int(config["""seed"""] )
__lowerCamelCase = int(config["""batch_size"""] )
__lowerCamelCase = args.model_name_or_path
set_seed(A__ )
__lowerCamelCase, __lowerCamelCase = get_dataloaders(A__ , A__ , A__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCamelCase = AutoModelForSequenceClassification.from_pretrained(A__ , return_dict=A__ )
# Instantiate optimizer
__lowerCamelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__lowerCamelCase = optimizer_cls(params=model.parameters() , lr=A__ )
if accelerator.state.deepspeed_plugin is not None:
__lowerCamelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
__lowerCamelCase = 1
__lowerCamelCase = (len(A__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__lowerCamelCase = get_linear_schedule_with_warmup(
optimizer=A__ , num_warmup_steps=0 , num_training_steps=A__ , )
else:
__lowerCamelCase = DummyScheduler(A__ , total_num_steps=A__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# We need to keep track of how many total steps we have iterated over
__lowerCamelCase = 0
# We also need to keep track of the stating epoch so files are named properly
__lowerCamelCase = 0
__lowerCamelCase = evaluate.load("""glue""" , """mrpc""" )
__lowerCamelCase = num_epochs
if args.partial_train_epoch is not None:
__lowerCamelCase = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
__lowerCamelCase = args.resume_from_checkpoint.split("""epoch_""" )[1]
__lowerCamelCase = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
__lowerCamelCase = int(A__ ) + 1
__lowerCamelCase = evaluation_loop(A__ , A__ , A__ , A__ )
accelerator.print("""resumed checkpoint performance:""" , A__ )
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , f'state_{starting_epoch-1}.json' ) , """r""" ) as f:
__lowerCamelCase = json.load(A__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
__lowerCamelCase = {}
for epoch in range(A__ , A__ ):
model.train()
for step, batch in enumerate(A__ ):
__lowerCamelCase = model(**A__ )
__lowerCamelCase = outputs.loss
__lowerCamelCase = loss / gradient_accumulation_steps
accelerator.backward(A__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
__lowerCamelCase = f'epoch_{epoch}'
__lowerCamelCase = os.path.join(args.output_dir , A__ )
accelerator.save_state(A__ )
__lowerCamelCase = evaluation_loop(A__ , A__ , A__ , A__ )
__lowerCamelCase = accuracy
__lowerCamelCase = lr_scheduler.get_lr()[0]
__lowerCamelCase = optimizer.param_groups[0]["""lr"""]
__lowerCamelCase = epoch
__lowerCamelCase = overall_step
accelerator.print(f'epoch {epoch}:' , A__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f'state_{epoch}.json' ) , """w""" ) as f:
json.dump(A__ , A__ )
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=A__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=A__ , )
parser.add_argument(
"""--output_dir""" , type=A__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=A__ , default=A__ , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=A__ , default=A__ , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=A__ , default=2 , help="""Number of train epochs.""" , )
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(A__ , A__ )
if __name__ == "__main__":
main()
| 12 | 1 |
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def __SCREAMING_SNAKE_CASE ( lowercase__=None ):
"""simple docstring"""
if subparsers is not None:
A = subparsers.add_parser("env" )
else:
A = argparse.ArgumentParser("Accelerate env command" )
parser.add_argument(
"--config_file" , default=lowercase__ , help="The config file to use for the default values in the launching script." )
if subparsers is not None:
parser.set_defaults(func=lowercase__ )
return parser
def __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
A = torch.__version__
A = torch.cuda.is_available()
A = is_xpu_available()
A = is_npu_available()
A = "Not found"
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(lowercase__ ):
A = load_config_from_file(args.config_file ).to_dict()
A = {
"`Accelerate` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Numpy version": np.__version__,
"PyTorch version (GPU?)": F"""{pt_version} ({pt_cuda_available})""",
"PyTorch XPU available": str(lowercase__ ),
"PyTorch NPU available": str(lowercase__ ),
"System RAM": F"""{psutil.virtual_memory().total / 1_024 ** 3:.2f} GB""",
}
if pt_cuda_available:
A = torch.cuda.get_device_name()
print("\nCopy-and-paste the text below in your GitHub issue\n" )
print("\n".join([F"""- {prop}: {val}""" for prop, val in info.items()] ) )
print("- `Accelerate` default config:" if args.config_file is None else "- `Accelerate` config passed:" )
A = (
"\n".join([F"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(lowercase__ , lowercase__ )
else F"""\t{accelerate_config}"""
)
print(lowercase__ )
A = accelerate_config
return info
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
A = env_command_parser()
A = parser.parse_args()
env_command(lowercase__ )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 57 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
# Load configuration defined in the metadata file
with open(lowercase__ ) as metadata_file:
A = json.load(lowercase__ )
A = LukeConfig(use_entity_aware_attention=lowercase__ , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
A = torch.load(lowercase__ , map_location="cpu" )
# Load the entity vocab file
A = load_entity_vocab(lowercase__ )
A = RobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
A = AddedToken("<ent>" , lstrip=lowercase__ , rstrip=lowercase__ )
A = AddedToken("<ent2>" , lstrip=lowercase__ , rstrip=lowercase__ )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(lowercase__ )
with open(os.path.join(lowercase__ , LukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(lowercase__ , lowercase__ )
A = LukeTokenizer.from_pretrained(lowercase__ )
# Initialize the embeddings of the special tokens
A = state_dict["embeddings.word_embeddings.weight"]
A = word_emb[tokenizer.convert_tokens_to_ids(["@"] )[0]].unsqueeze(0 )
A = word_emb[tokenizer.convert_tokens_to_ids(["#"] )[0]].unsqueeze(0 )
A = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
A = F"""encoder.layer.{layer_index}.attention.self."""
A = state_dict[prefix + matrix_name]
A = state_dict[prefix + matrix_name]
A = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
A = state_dict["entity_embeddings.entity_embeddings.weight"]
A = entity_emb[entity_vocab["[MASK]"]]
A = LukeModel(config=lowercase__ ).eval()
A , A = model.load_state_dict(lowercase__ , strict=lowercase__ )
if not (len(lowercase__ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F"""Missing keys {", ".join(lowercase__ )}. Expected only missing embeddings.position_ids""" )
if not (all(key.startswith("entity_predictions" ) or key.startswith("lm_head" ) for key in unexpected_keys )):
raise ValueError(
"Unexpected keys"
F""" {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}""" )
# Check outputs
A = LukeTokenizer.from_pretrained(lowercase__ , task="entity_classification" )
A = (
"Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"
" new world number one avoid a humiliating second- round exit at Wimbledon ."
)
A = (39, 42)
A = tokenizer(lowercase__ , entity_spans=[span] , add_prefix_space=lowercase__ , return_tensors="pt" )
A = model(**lowercase__ )
# Verify word hidden states
if model_size == "large":
A = torch.Size((1, 42, 1_024) )
A = torch.tensor(
[[0.01_33, 0.08_65, 0.00_95], [0.30_93, -0.25_76, -0.74_18], [-0.17_20, -0.21_17, -0.28_69]] )
else: # base
A = torch.Size((1, 42, 768) )
A = torch.tensor([[0.00_37, 0.13_68, -0.00_91], [0.10_99, 0.33_29, -0.10_95], [0.07_65, 0.53_35, 0.11_79]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase__ , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
A = torch.Size((1, 1, 1_024) )
A = torch.tensor([[0.04_66, -0.01_06, -0.01_79]] )
else: # base
A = torch.Size((1, 1, 768) )
A = torch.tensor([[0.14_57, 0.10_44, 0.01_74]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowercase__ , atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(lowercase__ ) )
model.save_pretrained(lowercase__ )
def __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
A = {}
with open(lowercase__ , "r" , encoding="utf-8" ) as f:
for index, line in enumerate(lowercase__ ):
A , A = line.rstrip().split("\t" )
A = index
return entity_vocab
if __name__ == "__main__":
__A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
__A : int = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 57 | 1 |
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
__A : int = get_tests_dir('''fixtures/test_sentencepiece.model''')
__A : List[str] = {'''target_lang''': '''fi''', '''source_lang''': '''en'''}
__A : int = '''>>zh<<'''
__A : Any = '''Helsinki-NLP/'''
if is_torch_available():
__A : List[Any] = '''pt'''
elif is_tf_available():
__A : Union[str, Any] = '''tf'''
else:
__A : Union[str, Any] = '''jax'''
@require_sentencepiece
class _UpperCAmelCase ( _A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Tuple = MarianTokenizer
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : Tuple = True
def A ( self : Dict ) -> Union[str, Any]:
super().setUp()
lowercase_ : Optional[int] = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
lowercase_ : Dict = dict(zip(A , range(len(A ) ) ) )
lowercase_ : Dict = Path(self.tmpdirname )
save_json(A , save_dir / VOCAB_FILES_NAMES['''vocab'''] )
save_json(A , save_dir / VOCAB_FILES_NAMES['''tokenizer_config_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(A , save_dir / VOCAB_FILES_NAMES['''source_spm'''] )
copyfile(A , save_dir / VOCAB_FILES_NAMES['''target_spm'''] )
lowercase_ : List[Any] = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def A ( self : str , **A : List[Any] ) -> MarianTokenizer:
return MarianTokenizer.from_pretrained(self.tmpdirname , **A )
def A ( self : List[str] , A : int ) -> int:
return (
"This is a test",
"This is a test",
)
def A ( self : int ) -> int:
lowercase_ : Tuple = '''</s>'''
lowercase_ : Dict = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def A ( self : List[str] ) -> str:
lowercase_ : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''</s>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''<pad>''' )
self.assertEqual(len(A ) , 9 )
def A ( self : str ) -> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def A ( self : Any ) -> Optional[int]:
lowercase_ : List[Any] = MarianTokenizer.from_pretrained(F'''{ORG_NAME}opus-mt-en-de''' )
lowercase_ : Dict = en_de_tokenizer(['''I am a small frog'''] , return_tensors=A )
self.assertIsInstance(A , A )
lowercase_ : Optional[int] = [38, 1_21, 14, 6_97, 3_88_48, 0]
self.assertListEqual(A , batch.input_ids[0] )
lowercase_ : Optional[int] = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(A )
lowercase_ : int = [x.name for x in Path(A ).glob('''*''' )]
self.assertIn('''source.spm''' , A )
MarianTokenizer.from_pretrained(A )
def A ( self : List[Any] ) -> int:
lowercase_ : int = self.get_tokenizer()
lowercase_ : Optional[Any] = tok(
['''I am a small frog''' * 10_00, '''I am a small frog'''] , padding=A , truncation=A , return_tensors=A )
self.assertIsInstance(A , A )
self.assertEqual(batch.input_ids.shape , (2, 5_12) )
def A ( self : Optional[int] ) -> str:
lowercase_ : Tuple = self.get_tokenizer()
lowercase_ : List[Any] = tok(['''I am a tiny frog''', '''I am a small frog'''] , padding=A , return_tensors=A )
self.assertIsInstance(A , A )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def A ( self : Optional[Any] ) -> Union[str, Any]:
# fmt: off
lowercase_ : Dict = {'''input_ids''': [[4_34_95, 4_62, 20, 4_21_64, 13_69, 52, 4_64, 1_32, 17_03, 4_92, 13, 74_91, 3_89_99, 6, 8, 4_64, 1_32, 17_03, 4_92, 13, 46_69, 3_78_67, 13, 75_25, 27, 15_93, 9_88, 13, 3_39_72, 70_29, 6, 20, 82_51, 3_83, 2, 2_70, 58_66, 37_88, 2, 23_53, 82_51, 1_23_38, 2, 1_39_58, 3_87, 2, 36_29, 69_53, 1_88, 29_00, 2, 1_39_58, 80_11, 1_15_01, 23, 84_60, 40_73, 3_40_09, 20, 4_35, 1_14_39, 27, 8, 84_60, 40_73, 60_04, 20, 99_88, 3_75, 27, 33, 2_66, 19_45, 10_76, 13_50, 3_78_67, 32_88, 5, 5_77, 10_76, 43_74, 8, 50_82, 5, 2_64_53, 2_57, 5_56, 4_03, 2, 2_42, 1_32, 3_83, 3_16, 4_92, 8, 1_07_67, 6, 3_16, 3_04, 42_39, 3, 0], [1_48, 1_57_22, 19, 18_39, 12, 13_50, 13, 2_23_27, 50_82, 54_18, 4_75_67, 3_59_38, 59, 3_18, 1_95_52, 1_08, 21_83, 54, 1_49_76, 48_35, 32, 5_47, 11_14, 8, 3_15, 24_17, 5, 92, 1_90_88, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00], [36, 63_95, 1_25_70, 3_91_47, 1_15_97, 6, 2_66, 4, 4_54_05, 72_96, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name='''Helsinki-NLP/opus-mt-en-de''' , revision='''1a8c2263da11e68e50938f97e10cd57820bd504c''' , decode_kwargs={'''use_source_tokenizer''': True} , )
def A ( self : Optional[int] ) -> List[str]:
lowercase_ : Any = MarianTokenizer.from_pretrained('''hf-internal-testing/test-marian-two-vocabs''' )
lowercase_ : Optional[Any] = '''Tämä on testi'''
lowercase_ : Union[str, Any] = '''This is a test'''
lowercase_ : str = [76, 7, 20_47, 2]
lowercase_ : Union[str, Any] = [69, 12, 11, 9_40, 2]
lowercase_ : int = tokenizer(A ).input_ids
self.assertListEqual(A , A )
lowercase_ : int = tokenizer(text_target=A ).input_ids
self.assertListEqual(A , A )
lowercase_ : Optional[Any] = tokenizer.decode(A , skip_special_tokens=A )
self.assertEqual(A , A )
| 33 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__A : List[Any] = {
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = [
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__A : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 33 | 1 |
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
lowercase : Tuple = logging.get_logger(__name__)
lowercase : Dict = 'T5Config'
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : List[Any] = jnp.zeros_like(snake_case__ )
A : Dict = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
A : Optional[Any] = shifted_input_ids.at[:, 0].set(snake_case__ )
A : Union[str, Any] = jnp.where(shifted_input_ids == -100 , snake_case__ , snake_case__ )
return shifted_input_ids
class A ( __snake_case ):
__magic_name__ = '''mt5'''
__magic_name__ = MTaConfig
class A ( __snake_case ):
__magic_name__ = '''mt5'''
__magic_name__ = MTaConfig
class A ( __snake_case ):
__magic_name__ = '''mt5'''
__magic_name__ = MTaConfig
| 311 |
'''simple docstring'''
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class A :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=50 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , ) -> str:
"""simple docstring"""
A : Any = parent
A : List[Any] = batch_size
A : Union[str, Any] = seq_length
A : Any = is_training
A : int = use_input_mask
A : Union[str, Any] = vocab_size
A : List[Any] = hidden_size
A : List[Any] = num_hidden_layers
A : Optional[int] = num_attention_heads
A : str = intermediate_size
A : Tuple = hidden_act
A : Union[str, Any] = hidden_dropout_prob
A : Union[str, Any] = attention_probs_dropout_prob
A : int = max_position_embeddings
A : Optional[int] = initializer_range
A : Any = use_labels
A : Optional[int] = scope
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A : Optional[int] = None
if self.use_input_mask:
A : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A : Dict = self.get_config()
return config, input_ids, input_mask, token_labels
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
(
(
A
), (
A
), (
A
), (
A
),
) : Any = self.prepare_config_and_inputs()
A : Tuple = True
A : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
A : List[str] = BertGenerationEncoder(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : List[Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE )
A : int = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
"""simple docstring"""
A : List[str] = True
A : Union[str, Any] = BertGenerationEncoder(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : str = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , )
A : List[Any] = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
A : Optional[Any] = True
A : Tuple = True
A : Optional[int] = BertGenerationDecoder(config=SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ).eval()
# first forward pass
A : str = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , use_cache=SCREAMING_SNAKE_CASE , )
A : Optional[int] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size )
A : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
A : Dict = torch.cat([input_ids, next_tokens] , dim=-1 )
A : List[str] = torch.cat([input_mask, next_mask] , dim=-1 )
A : str = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE , )['''hidden_states'''][0]
A : Any = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , past_key_values=SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE , )['''hidden_states'''][0]
# select random slice
A : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
A : str = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 ) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
A : Optional[Any] = BertGenerationDecoder(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : Optional[Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A, A, A, A : Optional[int] = self.prepare_config_and_inputs()
A : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class A ( __snake_case , __snake_case , __snake_case , unittest.TestCase ):
__magic_name__ = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
__magic_name__ = (BertGenerationDecoder,) if is_torch_available() else ()
__magic_name__ = (
{'''feature-extraction''': BertGenerationEncoder, '''text-generation''': BertGenerationDecoder}
if is_torch_available()
else {}
)
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
A : List[str] = BertGenerationEncoderTester(self )
A : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A, A, A, A : Tuple = self.model_tester.prepare_config_and_inputs()
A : str = '''bert'''
self.model_tester.create_and_check_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A : int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
(
(
A
), (
A
), (
A
), (
A
), (
A
), (
A
),
) : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
A : Union[str, Any] = None
self.model_tester.create_and_check_model_as_decoder(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*SCREAMING_SNAKE_CASE )
@slow
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : Optional[Any] = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
@require_torch
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
A : Tuple = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
A : Optional[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] )
with torch.no_grad():
A : Dict = model(SCREAMING_SNAKE_CASE )[0]
A : Optional[Any] = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
A : Dict = torch.tensor(
[[[0.1_775, 0.0_083, -0.0_321], [1.6_002, 0.1_287, 0.3_912], [2.1_473, 0.5_791, 0.6_066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@require_torch
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : Optional[Any] = BertGenerationDecoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
A : Dict = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] )
with torch.no_grad():
A : Optional[Any] = model(SCREAMING_SNAKE_CASE )[0]
A : Optional[Any] = torch.Size([1, 8, 50358] )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
A : Any = torch.tensor(
[[[-0.5_788, -2.5_994, -3.7_054], [0.0_438, 4.7_997, 1.8_795], [1.5_862, 6.6_409, 4.4_638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 311 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
_A : Union[str, Any] = """https://www.indeed.co.in/jobs?q=mobile+app+development&l="""
def __magic_name__ ( __snake_case : str = "mumbai" ) -> Union[str, Any]:
lowercase : Union[str, Any] = BeautifulSoup(requests.get(url + location ).content , "html.parser" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("div" , attrs={"data-tn-component": "organicJob"} ):
lowercase : Any = job.find("a" , attrs={"data-tn-element": "jobTitle"} ).text.strip()
lowercase : str = job.find("span" , {"class": "company"} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("""Bangalore"""), 1):
print(F"Job {i:>2} is {job[0]} at {job[1]}")
| 202 | """simple docstring"""
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
class UpperCamelCase ( enum.Enum ):
lowercase = 0
lowercase = 1
@add_end_docstrings(lowercase_ )
class UpperCamelCase ( lowercase_ ):
lowercase = 'generated'
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Tuple:
'''simple docstring'''
super().__init__(*__UpperCamelCase ,**__UpperCamelCase )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def _UpperCAmelCase ( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,**__UpperCamelCase ,) -> Optional[Any]:
'''simple docstring'''
lowercase_ : List[Any] = {}
if truncation is not None:
lowercase_ : int = truncation
lowercase_ : Dict = generate_kwargs
lowercase_ : List[Any] = {}
if return_tensors is not None and return_type is None:
lowercase_ : Union[str, Any] = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
lowercase_ : str = return_type
if clean_up_tokenization_spaces is not None:
lowercase_ : Dict = clean_up_tokenization_spaces
if stop_sequence is not None:
lowercase_ : Union[str, Any] = self.tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase )
if len(__UpperCamelCase ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
lowercase_ : Optional[int] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
return True
def _UpperCAmelCase ( self ,*__UpperCamelCase ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
lowercase_ : Dict = self.model.config.prefix if self.model.config.prefix is not None else ''
if isinstance(args[0] ,__UpperCamelCase ):
if self.tokenizer.pad_token_id is None:
raise ValueError('Please make sure that the tokenizer has a pad_token_id when using a batch input' )
lowercase_ : str = ([prefix + arg for arg in args[0]],)
lowercase_ : Union[str, Any] = True
elif isinstance(args[0] ,__UpperCamelCase ):
lowercase_ : Union[str, Any] = (prefix + args[0],)
lowercase_ : Union[str, Any] = False
else:
raise ValueError(
f''' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`''' )
lowercase_ : List[Any] = self.tokenizer(*__UpperCamelCase ,padding=__UpperCamelCase ,truncation=__UpperCamelCase ,return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Dict:
'''simple docstring'''
lowercase_ : Optional[int] = super().__call__(*__UpperCamelCase ,**__UpperCamelCase )
if (
isinstance(args[0] ,__UpperCamelCase )
and all(isinstance(__UpperCamelCase ,__UpperCamelCase ) for el in args[0] )
and all(len(__UpperCamelCase ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=TruncationStrategy.DO_NOT_TRUNCATE ,**__UpperCamelCase ) -> List[str]:
'''simple docstring'''
lowercase_ : Any = self._parse_and_tokenize(__UpperCamelCase ,truncation=__UpperCamelCase ,**__UpperCamelCase )
return inputs
def _UpperCAmelCase ( self ,__UpperCamelCase ,**__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
if self.framework == "pt":
lowercase_ , lowercase_ : Optional[int] = model_inputs['input_ids'].shape
elif self.framework == "tf":
lowercase_ , lowercase_ : Union[str, Any] = tf.shape(model_inputs['input_ids'] ).numpy()
lowercase_ : str = generate_kwargs.get('min_length' ,self.model.config.min_length )
lowercase_ : List[Any] = generate_kwargs.get('max_length' ,self.model.config.max_length )
self.check_inputs(__UpperCamelCase ,generate_kwargs['min_length'] ,generate_kwargs['max_length'] )
lowercase_ : Tuple = self.model.generate(**__UpperCamelCase ,**__UpperCamelCase )
lowercase_ : str = output_ids.shape[0]
if self.framework == "pt":
lowercase_ : List[Any] = output_ids.reshape(__UpperCamelCase ,out_b // in_b ,*output_ids.shape[1:] )
elif self.framework == "tf":
lowercase_ : List[Any] = tf.reshape(__UpperCamelCase ,(in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=ReturnType.TEXT ,__UpperCamelCase=False ) -> Dict:
'''simple docstring'''
lowercase_ : Dict = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
lowercase_ : List[Any] = {f'''{self.return_name}_token_ids''': output_ids}
elif return_type == ReturnType.TEXT:
lowercase_ : str = {
f'''{self.return_name}_text''': self.tokenizer.decode(
__UpperCamelCase ,skip_special_tokens=__UpperCamelCase ,clean_up_tokenization_spaces=__UpperCamelCase ,)
}
records.append(__UpperCamelCase )
return records
@add_end_docstrings(lowercase_ )
class UpperCamelCase ( lowercase_ ):
lowercase = 'summary'
def __call__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
return super().__call__(*__UpperCamelCase ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> bool:
'''simple docstring'''
if max_length < min_length:
logger.warning(f'''Your min_length={min_length} must be inferior than your max_length={max_length}.''' )
if input_length < max_length:
logger.warning(
f'''Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '''
'a summarization task, where outputs shorter than the input are typically wanted, you might '
f'''consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})''' )
@add_end_docstrings(lowercase_ )
class UpperCamelCase ( lowercase_ ):
lowercase = 'translation'
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
if input_length > 0.9 * max_length:
logger.warning(
f'''Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '''
'increasing your max_length manually, e.g. translator(\'...\', max_length=400)' )
return True
def _UpperCAmelCase ( self ,*__UpperCamelCase ,__UpperCamelCase=TruncationStrategy.DO_NOT_TRUNCATE ,__UpperCamelCase=None ,__UpperCamelCase=None ) -> int:
'''simple docstring'''
if getattr(self.tokenizer ,'_build_translation_inputs' ,__UpperCamelCase ):
return self.tokenizer._build_translation_inputs(
*__UpperCamelCase ,return_tensors=self.framework ,truncation=__UpperCamelCase ,src_lang=__UpperCamelCase ,tgt_lang=__UpperCamelCase )
else:
return super()._parse_and_tokenize(*__UpperCamelCase ,truncation=__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,**__UpperCamelCase ) -> str:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ : int = super()._sanitize_parameters(**__UpperCamelCase )
if src_lang is not None:
lowercase_ : str = src_lang
if tgt_lang is not None:
lowercase_ : Optional[Any] = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
lowercase_ : Tuple = kwargs.get('task' ,self.task )
lowercase_ : List[str] = task.split('_' )
if task and len(__UpperCamelCase ) == 4:
# translation, XX, to YY
lowercase_ : Union[str, Any] = items[1]
lowercase_ : Tuple = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Dict:
'''simple docstring'''
return super().__call__(*__UpperCamelCase ,**__UpperCamelCase )
| 213 | 0 |
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase__ = logging.getLogger()
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("-f" )
lowercase__ : Any = parser.parse_args()
return args.f
class snake_case__(__snake_case ):
"""simple docstring"""
def snake_case ( self : Any ):
lowercase__ : str = logging.StreamHandler(sys.stdout )
logger.addHandler(SCREAMING_SNAKE_CASE )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase__ : List[str] = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , "run_glue_deebert.py" )
with patch.object(SCREAMING_SNAKE_CASE , "argv" , SCREAMING_SNAKE_CASE ):
lowercase__ : List[Any] = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(SCREAMING_SNAKE_CASE , 0.666 )
@slow
@require_torch_non_multi_gpu
def snake_case ( self : Optional[int] ):
lowercase__ : Union[str, Any] = "\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n ".split()
self.run_and_check(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(SCREAMING_SNAKE_CASE )
| 362 |
def __lowerCamelCase ( lowerCamelCase__ = 1_000 ):
"""simple docstring"""
lowercase__ , lowercase__ : int = 1, 1
lowercase__ : List[Any] = []
for i in range(1 , n + 1 ):
lowercase__ : Dict = prev_numerator + 2 * prev_denominator
lowercase__ : Tuple = prev_numerator + prev_denominator
if len(str(lowerCamelCase__ ) ) > len(str(lowerCamelCase__ ) ):
result.append(lowerCamelCase__ )
lowercase__ : int = numerator
lowercase__ : int = denominator
return len(lowerCamelCase__ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 121 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( lowerCAmelCase__ , unittest.TestCase):
_lowerCAmelCase : List[str] = DanceDiffusionPipeline
_lowerCAmelCase : Tuple = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
_lowerCAmelCase : int = PipelineTesterMixin.required_optional_params - {
"""callback""",
"""latents""",
"""callback_steps""",
"""output_type""",
"""num_images_per_prompt""",
}
_lowerCAmelCase : Optional[Any] = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
_lowerCAmelCase : Any = False
_lowerCAmelCase : List[Any] = False
def _snake_case ( self : List[str] ):
torch.manual_seed(0 )
snake_case_ : Tuple = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=lowercase_ , use_timestep_embedding=lowercase_ , time_embedding_type='''fourier''' , mid_block_type='''UNetMidBlock1D''' , down_block_types=('''DownBlock1DNoSkip''', '''DownBlock1D''', '''AttnDownBlock1D''') , up_block_types=('''AttnUpBlock1D''', '''UpBlock1D''', '''UpBlock1DNoSkip''') , )
snake_case_ : Dict = IPNDMScheduler()
snake_case_ : List[str] = {
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def _snake_case ( self : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : Tuple=0 ):
if str(lowercase_ ).startswith('''mps''' ):
snake_case_ : Any = torch.manual_seed(lowercase_ )
else:
snake_case_ : Union[str, Any] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
snake_case_ : str = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 4,
}
return inputs
def _snake_case ( self : Dict ):
snake_case_ : Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case_ : Tuple = self.get_dummy_components()
snake_case_ : int = DanceDiffusionPipeline(**lowercase_ )
snake_case_ : int = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
snake_case_ : Union[str, Any] = self.get_dummy_inputs(lowercase_ )
snake_case_ : str = pipe(**lowercase_ )
snake_case_ : Optional[int] = output.audios
snake_case_ : int = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
snake_case_ : List[Any] = np.array([-0.72_65, 1.00_00, -0.83_88, 0.11_75, 0.94_98, -1.00_00] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def _snake_case ( self : Optional[Any] ):
return super().test_save_load_local()
@skip_mps
def _snake_case ( self : Dict ):
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def _snake_case ( self : Any ):
return super().test_save_load_optional_components()
@skip_mps
def _snake_case ( self : Tuple ):
return super().test_attention_slicing_forward_pass()
def _snake_case ( self : Union[str, Any] ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self : Tuple ):
snake_case_ : str = torch_device
snake_case_ : int = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' )
snake_case_ : str = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
snake_case_ : Optional[Any] = torch.manual_seed(0 )
snake_case_ : Any = pipe(generator=lowercase_ , num_inference_steps=100 , audio_length_in_s=4.0_96 )
snake_case_ : Tuple = output.audios
snake_case_ : Union[str, Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
snake_case_ : Optional[Any] = np.array([-0.01_92, -0.02_31, -0.03_18, -0.00_59, 0.00_02, -0.00_20] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self : Union[str, Any] ):
snake_case_ : Union[str, Any] = torch_device
snake_case_ : Tuple = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' , torch_dtype=torch.floataa )
snake_case_ : List[str] = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
snake_case_ : Dict = torch.manual_seed(0 )
snake_case_ : Tuple = pipe(generator=lowercase_ , num_inference_steps=100 , audio_length_in_s=4.0_96 )
snake_case_ : List[Any] = output.audios
snake_case_ : Union[str, Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
snake_case_ : Optional[Any] = np.array([-0.03_67, -0.04_88, -0.07_71, -0.05_25, -0.04_44, -0.03_41] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 264 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ : List[Any] = {
'''configuration_distilbert''': [
'''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''DistilBertConfig''',
'''DistilBertOnnxConfig''',
],
'''tokenization_distilbert''': ['''DistilBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Any = ['''DistilBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : int = [
'''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DistilBertForMaskedLM''',
'''DistilBertForMultipleChoice''',
'''DistilBertForQuestionAnswering''',
'''DistilBertForSequenceClassification''',
'''DistilBertForTokenClassification''',
'''DistilBertModel''',
'''DistilBertPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = [
'''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDistilBertForMaskedLM''',
'''TFDistilBertForMultipleChoice''',
'''TFDistilBertForQuestionAnswering''',
'''TFDistilBertForSequenceClassification''',
'''TFDistilBertForTokenClassification''',
'''TFDistilBertMainLayer''',
'''TFDistilBertModel''',
'''TFDistilBertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Tuple = [
'''FlaxDistilBertForMaskedLM''',
'''FlaxDistilBertForMultipleChoice''',
'''FlaxDistilBertForQuestionAnswering''',
'''FlaxDistilBertForSequenceClassification''',
'''FlaxDistilBertForTokenClassification''',
'''FlaxDistilBertModel''',
'''FlaxDistilBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowercase__ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 264 | 1 |
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=7 , _lowerCamelCase=3 , _lowerCamelCase=1_8 , _lowerCamelCase=3_0 , _lowerCamelCase=4_0_0 , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , ):
UpperCamelCase_: Tuple = size if size is not None else {"height": 1_8, "width": 1_8}
UpperCamelCase_: Optional[Any] = parent
UpperCamelCase_: Optional[Any] = batch_size
UpperCamelCase_: str = num_channels
UpperCamelCase_: Optional[int] = image_size
UpperCamelCase_: Union[str, Any] = min_resolution
UpperCamelCase_: int = max_resolution
UpperCamelCase_: Optional[int] = do_resize
UpperCamelCase_: Dict = size
UpperCamelCase_: Optional[int] = do_normalize
def _a ( self ):
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_8_6_6_4_4_3_6_3_4_0_3_3_2_0_3, 0.6_6_1_8_8_2_9_3_6_9_5_4_4_9_8_3, 0.3_8_9_1_7_4_6_4_0_1_7_8_6_8_0_4],
[-0.6_0_4_2_5_5_9_1_4_6_8_8_1_1_0_4, -0.0_2_2_9_5_0_0_8_8_6_0_5_2_8_4_6_9, 0.5_4_2_3_7_9_7_3_6_9_0_0_3_2_9_6],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class _lowerCAmelCase( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
a : List[Any] =ImageGPTImageProcessor if is_vision_available() else None
def _a ( self ):
UpperCamelCase_: Tuple = ImageGPTImageProcessingTester(self )
@property
def _a ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self ):
UpperCamelCase_: List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , 'clusters' ) )
self.assertTrue(hasattr(_a , 'do_resize' ) )
self.assertTrue(hasattr(_a , 'size' ) )
self.assertTrue(hasattr(_a , 'do_normalize' ) )
def _a ( self ):
UpperCamelCase_: Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 1_8, 'width': 1_8} )
UpperCamelCase_: Any = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {'height': 4_2, 'width': 4_2} )
def _a ( self ):
UpperCamelCase_: str = self.image_processing_class(**self.image_processor_dict )
UpperCamelCase_: str = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(_a , obj[key] ) )
else:
self.assertEqual(obj[key] , _a )
def _a ( self ):
UpperCamelCase_: str = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase_: Optional[int] = os.path.join(_a , 'image_processor.json' )
image_processor_first.to_json_file(_a )
UpperCamelCase_: Union[str, Any] = self.image_processing_class.from_json_file(_a ).to_dict()
UpperCamelCase_: Any = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_a , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , _a )
def _a ( self ):
UpperCamelCase_: int = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(_a )
UpperCamelCase_: Optional[int] = self.image_processing_class.from_pretrained(_a ).to_dict()
UpperCamelCase_: int = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_a , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , _a )
@unittest.skip('ImageGPT requires clusters at initialization' )
def _a ( self ):
pass
def snake_case () -> Union[str, Any]:
UpperCamelCase_: str = load_dataset('hf-internal-testing/fixtures_image_utils' , split='test' )
UpperCamelCase_: List[Any] = Image.open(dataset[4]['file'] )
UpperCamelCase_: Tuple = Image.open(dataset[5]['file'] )
UpperCamelCase_: Dict = [imagea, imagea]
return images
@require_vision
@require_torch
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ):
UpperCamelCase_: Optional[Any] = ImageGPTImageProcessor.from_pretrained('openai/imagegpt-small' )
UpperCamelCase_: Optional[Any] = prepare_images()
# test non-batched
UpperCamelCase_: str = image_processing(images[0] , return_tensors='pt' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1_0_2_4) )
UpperCamelCase_: List[str] = [3_0_6, 1_9_1, 1_9_1]
self.assertEqual(encoding.input_ids[0, :3].tolist() , _a )
# test batched
UpperCamelCase_: int = image_processing(_a , return_tensors='pt' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1_0_2_4) )
UpperCamelCase_: Dict = [3_0_3, 1_3, 1_3]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , _a ) | 369 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> np.array:
UpperCamelCase_: Dict = F'''{sampling_rate}'''
UpperCamelCase_: Any = '1'
UpperCamelCase_: Any = 'f32le'
UpperCamelCase_: Union[str, Any] = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(UpperCAmelCase__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
UpperCamelCase_: Optional[Any] = ffmpeg_process.communicate(UpperCAmelCase__ )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
UpperCamelCase_: Union[str, Any] = output_stream[0]
UpperCamelCase_: List[str] = np.frombuffer(UpperCAmelCase__ , np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = "f32le" , ) -> Tuple:
UpperCamelCase_: Any = F'''{sampling_rate}'''
UpperCamelCase_: Union[str, Any] = '1'
if format_for_conversion == "s16le":
UpperCamelCase_: Optional[Any] = 2
elif format_for_conversion == "f32le":
UpperCamelCase_: Any = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
UpperCamelCase_: int = platform.system()
if system == "Linux":
UpperCamelCase_: Tuple = 'alsa'
UpperCamelCase_: List[str] = 'default'
elif system == "Darwin":
UpperCamelCase_: int = 'avfoundation'
UpperCamelCase_: Union[str, Any] = ':0'
elif system == "Windows":
UpperCamelCase_: Tuple = 'dshow'
UpperCamelCase_: Dict = 'default'
UpperCamelCase_: Any = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
UpperCamelCase_: Tuple = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
UpperCamelCase_: Optional[int] = _ffmpeg_stream(UpperCAmelCase__ , UpperCAmelCase__ )
for item in iterator:
yield item
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = "f32le" , ) -> Any:
if stream_chunk_s is not None:
UpperCamelCase_: List[Any] = stream_chunk_s
else:
UpperCamelCase_: Dict = chunk_length_s
UpperCamelCase_: List[str] = ffmpeg_microphone(UpperCAmelCase__ , UpperCAmelCase__ , format_for_conversion=UpperCAmelCase__ )
if format_for_conversion == "s16le":
UpperCamelCase_: Union[str, Any] = np.intaa
UpperCamelCase_: List[Any] = 2
elif format_for_conversion == "f32le":
UpperCamelCase_: str = np.floataa
UpperCamelCase_: Tuple = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
UpperCamelCase_: int = chunk_length_s / 6
UpperCamelCase_: Tuple = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(UpperCAmelCase__ , (int, float) ):
UpperCamelCase_: Union[str, Any] = [stride_length_s, stride_length_s]
UpperCamelCase_: Any = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
UpperCamelCase_: Dict = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
UpperCamelCase_: Optional[int] = datetime.datetime.now()
UpperCamelCase_: Optional[int] = datetime.timedelta(seconds=UpperCAmelCase__ )
for item in chunk_bytes_iter(UpperCAmelCase__ , UpperCAmelCase__ , stride=(stride_left, stride_right) , stream=UpperCAmelCase__ ):
# Put everything back in numpy scale
UpperCamelCase_: Tuple = np.frombuffer(item['raw'] , dtype=UpperCAmelCase__ )
UpperCamelCase_: Optional[int] = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
UpperCamelCase_: int = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 1_0 * delta:
# We're late !! SKIP
continue
yield item
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = False ) -> int:
UpperCamelCase_: str = b''
UpperCamelCase_ ,UpperCamelCase_: Union[str, Any] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
UpperCamelCase_: List[str] = 0
for raw in iterator:
acc += raw
if stream and len(UpperCAmelCase__ ) < chunk_len:
UpperCamelCase_: Optional[Any] = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(UpperCAmelCase__ ) >= chunk_len:
# We are flushing the accumulator
UpperCamelCase_: int = (_stride_left, stride_right)
UpperCamelCase_: Optional[Any] = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
UpperCamelCase_: Any = False
yield item
UpperCamelCase_: Optional[int] = stride_left
UpperCamelCase_: Optional[Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(UpperCAmelCase__ ) > stride_left:
UpperCamelCase_: int = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
UpperCamelCase_: Optional[Any] = False
yield item
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> int:
UpperCamelCase_: Any = 2**2_4 # 16Mo
try:
with subprocess.Popen(UpperCAmelCase__ , stdout=subprocess.PIPE , bufsize=UpperCAmelCase__ ) as ffmpeg_process:
while True:
UpperCamelCase_: Any = ffmpeg_process.stdout.read(UpperCAmelCase__ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error | 292 | 0 |
'''simple docstring'''
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
lowercase__ : Optional[int] = logging.getLogger(__name__)
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self : str , lowerCAmelCase__ : int , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int]=None ) -> int:
'''simple docstring'''
super().__init__(
lowerCAmelCase__ , question_encoder_tokenizer=lowerCAmelCase__ , generator_tokenizer=lowerCAmelCase__ , index=lowerCAmelCase__ , init_retrieval=lowerCAmelCase__ , )
_UpperCamelCase = None
def snake_case__ ( self : List[str] , lowerCAmelCase__ : int ) -> int:
'''simple docstring'''
logger.info('''initializing retrieval''' )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info('''dist initialized''' )
# needs to be set manually
_UpperCamelCase = self._infer_socket_ifname()
# avoid clash with the NCCL port
_UpperCamelCase = str(distributed_port + 1 )
_UpperCamelCase = dist.new_group(ranks=lowerCAmelCase__ , backend='''gloo''' )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info('''dist not initialized / main''' )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def snake_case__ ( self : int ) -> List[Any]:
'''simple docstring'''
return dist.get_rank(group=self.process_group ) == 0
def snake_case__ ( self : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : str , lowerCAmelCase__ : List[str]=torch.floataa ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = torch.empty(lowerCAmelCase__ , dtype=lowerCAmelCase__ )
dist.scatter(lowerCAmelCase__ , src=0 , scatter_list=lowerCAmelCase__ , group=self.process_group )
return target_tensor
def snake_case__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
_UpperCamelCase = next((addr for addr in addrs if addr.startswith('''e''' )) , lowerCAmelCase__ )
return ifname
def snake_case__ ( self : str , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : int ) -> Tuple[np.ndarray, List[dict]]:
'''simple docstring'''
if not dist.is_initialized():
_UpperCamelCase , _UpperCamelCase = self._main_retrieve(lowerCAmelCase__ , lowerCAmelCase__ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowerCAmelCase__ )
# distributed training
_UpperCamelCase = dist.get_world_size(group=self.process_group )
# gather logic
_UpperCamelCase = None
if self._is_main():
_UpperCamelCase = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(lowerCAmelCase__ )]
dist.gather(torch.tensor(lowerCAmelCase__ ) , dst=0 , gather_list=lowerCAmelCase__ , group=self.process_group )
# scatter logic
_UpperCamelCase = question_hidden_states.shape[0]
_UpperCamelCase = []
_UpperCamelCase = []
if self._is_main():
assert len(lowerCAmelCase__ ) == world_size
_UpperCamelCase , _UpperCamelCase = self._main_retrieve(torch.cat(lowerCAmelCase__ ).numpy() , lowerCAmelCase__ )
_UpperCamelCase , _UpperCamelCase = torch.tensor(lowerCAmelCase__ ), torch.tensor(lowerCAmelCase__ )
_UpperCamelCase = self._chunk_tensor(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = self._chunk_tensor(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = self._scattered(lowerCAmelCase__ , [n_queries, n_docs] , target_type=torch.intaa )
_UpperCamelCase = self._scattered(lowerCAmelCase__ , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(lowerCAmelCase__ )
| 324 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
lowercase__ : str = logging.get_logger(__name__)
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Union[str, Any] = ['pixel_values']
def __init__( self : Optional[Any] , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Dict[str, int]] = None , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Union[int, float] = 1 / 255 , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase__ : Optional[Any] , ) -> None:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
_UpperCamelCase = size if size is not None else {'''shortest_edge''': 256}
_UpperCamelCase = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
_UpperCamelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
_UpperCamelCase = get_size_dict(lowerCAmelCase__ , param_name='''crop_size''' )
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = resample
_UpperCamelCase = do_center_crop
_UpperCamelCase = crop_size
_UpperCamelCase = do_rescale
_UpperCamelCase = rescale_factor
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case__ ( self : Tuple , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Dict[str, int] , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
_UpperCamelCase = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
_UpperCamelCase = get_resize_output_image_size(lowerCAmelCase__ , size=size['''shortest_edge'''] , default_to_square=lowerCAmelCase__ )
return resize(lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Dict[str, int] , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
_UpperCamelCase = get_size_dict(lowerCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(lowerCAmelCase__ , size=(size['''height'''], size['''width''']) , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case__ ( self : Dict , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : float , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Tuple ) -> np.ndarray:
'''simple docstring'''
return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case__ ( self : str , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Union[float, List[float]] , lowerCAmelCase__ : Union[float, List[float]] , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Any , ) -> np.ndarray:
'''simple docstring'''
return normalize(lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : ImageInput , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : PILImageResampling = None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[float] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , lowerCAmelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase__ : Optional[Any] , ) -> Any:
'''simple docstring'''
_UpperCamelCase = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase = size if size is not None else self.size
_UpperCamelCase = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
_UpperCamelCase = resample if resample is not None else self.resample
_UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCamelCase = crop_size if crop_size is not None else self.crop_size
_UpperCamelCase = get_size_dict(lowerCAmelCase__ , param_name='''crop_size''' )
_UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase = image_std if image_std is not None else self.image_std
_UpperCamelCase = make_list_of_images(lowerCAmelCase__ )
if not valid_images(lowerCAmelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_UpperCamelCase = [to_numpy_array(lowerCAmelCase__ ) for image in images]
if do_resize:
_UpperCamelCase = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ ) for image in images]
if do_center_crop:
_UpperCamelCase = [self.center_crop(image=lowerCAmelCase__ , size=lowerCAmelCase__ ) for image in images]
if do_rescale:
_UpperCamelCase = [self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__ ) for image in images]
if do_normalize:
_UpperCamelCase = [self.normalize(image=lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ ) for image in images]
_UpperCamelCase = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__ ) for image in images]
_UpperCamelCase = {'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ )
def snake_case__ ( self : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[Tuple] = None ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(lowerCAmelCase__ ):
_UpperCamelCase = target_sizes.numpy()
_UpperCamelCase = []
for idx in range(len(lowerCAmelCase__ ) ):
_UpperCamelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=lowerCAmelCase__ )
_UpperCamelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCAmelCase__ )
else:
_UpperCamelCase = logits.argmax(dim=1 )
_UpperCamelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 324 | 1 |
lowerCAmelCase : Any = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
lowerCAmelCase : List[Any] = [{'type': 'code', 'content': INSTALL_CONTENT}]
lowerCAmelCase : Union[str, Any] = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 352 |
import torch
from diffusers import StableDiffusionPipeline
lowerCAmelCase : Any = """path-to-your-trained-model"""
lowerCAmelCase : int = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("""cuda""")
lowerCAmelCase : Union[str, Any] = """A photo of sks dog in a bucket"""
lowerCAmelCase : Any = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("""dog-bucket.png""")
| 127 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
A : Optional[Any] = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = ["ViTFeatureExtractor"]
A : int = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Dict = [
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
A : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 57 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def UpperCamelCase ( __magic_name__ : Any ) -> Optional[int]:
"""simple docstring"""
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def UpperCamelCase ( __magic_name__ : int ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = create_tensor(__magic_name__ )
lowercase__ = gather(__magic_name__ )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def UpperCamelCase ( __magic_name__ : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ = [state.process_index]
lowercase__ = gather_object(__magic_name__ )
assert len(__magic_name__ ) == state.num_processes, f'''{gathered_obj}, {len(__magic_name__ )} != {state.num_processes}'''
assert gathered_obj == list(range(state.num_processes ) ), f'''{gathered_obj} != {list(range(state.num_processes ) )}'''
def UpperCamelCase ( __magic_name__ : str ) -> Dict:
"""simple docstring"""
lowercase__ = create_tensor(__magic_name__ )
lowercase__ = broadcast(__magic_name__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def UpperCamelCase ( __magic_name__ : str ) -> Dict:
"""simple docstring"""
if state.is_main_process:
lowercase__ = torch.arange(state.num_processes + 1 ).to(state.device )
else:
lowercase__ = torch.arange(state.num_processes ).to(state.device )
lowercase__ = pad_across_processes(__magic_name__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def UpperCamelCase ( __magic_name__ : List[Any] ) -> Optional[int]:
"""simple docstring"""
if state.num_processes != 2:
return
lowercase__ = create_tensor(__magic_name__ )
lowercase__ = reduce(__magic_name__ , """sum""" )
lowercase__ = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(__magic_name__ , __magic_name__ ), f'''{reduced_tensor} != {truth_tensor}'''
def UpperCamelCase ( __magic_name__ : Dict ) -> int:
"""simple docstring"""
if state.num_processes != 2:
return
lowercase__ = create_tensor(__magic_name__ )
lowercase__ = reduce(__magic_name__ , """mean""" )
lowercase__ = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(__magic_name__ , __magic_name__ ), f'''{reduced_tensor} != {truth_tensor}'''
def UpperCamelCase ( __magic_name__ : str ) -> int:
"""simple docstring"""
main()
def UpperCamelCase ( ) -> Optional[int]:
"""simple docstring"""
lowercase__ = PartialState()
state.print(f'''State: {state}''' )
state.print("""testing gather""" )
test_gather(__magic_name__ )
state.print("""testing gather_object""" )
test_gather_object(__magic_name__ )
state.print("""testing broadcast""" )
test_broadcast(__magic_name__ )
state.print("""testing pad_across_processes""" )
test_pad_across_processes(__magic_name__ )
state.print("""testing reduce_sum""" )
test_reduce_sum(__magic_name__ )
state.print("""testing reduce_mean""" )
test_reduce_mean(__magic_name__ )
if __name__ == "__main__":
main()
| 305 | 0 |
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase ( _UpperCAmelCase ):
lowerCAmelCase : Optional[int] = (DDPMParallelScheduler,)
def __A ( self , **UpperCAmelCase__ ):
A__ = {
"num_train_timesteps": 1_000,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**_a )
return config
def __A ( self ):
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_a )
def __A ( self ):
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_a , beta_end=_a )
def __A ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_a )
def __A ( self ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_a )
def __A ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_a )
def __A ( self ):
self.check_over_configs(thresholding=_a )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_a , prediction_type=_a , sample_max_value=_a , )
def __A ( self ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_a )
def __A ( self ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=_a )
def __A ( self ):
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**_a )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00_979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def __A ( self ):
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**_a )
A__ = len(_a )
A__ = self.dummy_model()
A__ = self.dummy_sample_deter
A__ = self.dummy_sample_deter + 0.1
A__ = self.dummy_sample_deter - 0.1
A__ = samplea.shape[0]
A__ = torch.stack([samplea, samplea, samplea] , dim=0 )
A__ = torch.arange(_a )[0:3, None].repeat(1 , _a )
A__ = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
A__ = scheduler.batch_step_no_noise(_a , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
A__ = torch.sum(torch.abs(_a ) )
A__ = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 1_153.1_833 ) < 1e-2
assert abs(result_mean.item() - 0.5_005 ) < 1e-3
def __A ( self ):
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**_a )
A__ = len(_a )
A__ = self.dummy_model()
A__ = self.dummy_sample_deter
A__ = torch.manual_seed(0 )
for t in reversed(range(_a ) ):
# 1. predict noise residual
A__ = model(_a , _a )
# 2. predict previous mean of sample x_t-1
A__ = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
A__ = pred_prev_sample
A__ = torch.sum(torch.abs(_a ) )
A__ = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 258.9_606 ) < 1e-2
assert abs(result_mean.item() - 0.3_372 ) < 1e-3
def __A ( self ):
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(prediction_type="v_prediction" )
A__ = scheduler_class(**_a )
A__ = len(_a )
A__ = self.dummy_model()
A__ = self.dummy_sample_deter
A__ = torch.manual_seed(0 )
for t in reversed(range(_a ) ):
# 1. predict noise residual
A__ = model(_a , _a )
# 2. predict previous mean of sample x_t-1
A__ = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
A__ = pred_prev_sample
A__ = torch.sum(torch.abs(_a ) )
A__ = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 202.0_296 ) < 1e-2
assert abs(result_mean.item() - 0.2_631 ) < 1e-3
def __A ( self ):
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**_a )
A__ = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_a )
A__ = scheduler.timesteps
for i, timestep in enumerate(_a ):
if i == len(_a ) - 1:
A__ = -1
else:
A__ = timesteps[i + 1]
A__ = scheduler.previous_timestep(_a )
A__ = prev_t.item()
self.assertEqual(_a , _a )
def __A ( self ):
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**_a )
A__ = [100, 87, 50, 51, 0]
with self.assertRaises(_a , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=_a )
def __A ( self ):
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**_a )
A__ = [100, 87, 50, 1, 0]
A__ = len(_a )
with self.assertRaises(_a , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=_a , timesteps=_a )
def __A ( self ):
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**_a )
A__ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_a , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=_a )
| 369 |
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCamelCase ( _A : Optional[int] )-> List[Any]:
"""simple docstring"""
A__ = FileLock(str(tmpdir / "foo.lock" ) )
A__ = FileLock(str(tmpdir / "foo.lock" ) )
A__ = 0.01
with locka.acquire():
with pytest.raises(_A ):
A__ = time.time()
locka.acquire(_A )
assert time.time() - _start > timeout
def UpperCamelCase ( _A : str )-> List[Any]:
"""simple docstring"""
A__ = "a" * 1000 + ".lock"
A__ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(".lock" )
assert not locka._lock_file.endswith(_A )
assert len(os.path.basename(locka._lock_file ) ) <= 255
A__ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(_A ):
locka.acquire(0 )
| 198 | 0 |
from collections import deque
from .hash_table import HashTable
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , *lowercase , **lowercase ):
"""simple docstring"""
super().__init__(*lowercase , **lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
A_ : int = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(lowercase )
A_ : int = self.values[key]
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return (
sum(self.charge_factor - len(lowercase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def lowerCAmelCase_ ( self , lowercase , lowercase=None ):
"""simple docstring"""
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(lowercase ) == 0
):
return key
return super()._collision_resolution(lowercase , lowercase )
| 140 | import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_UpperCAmelCase = 2
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , *, # begin keyword-only arguments
lowercase="<s>" , lowercase="<pad>" , lowercase="</s>" , lowercase="<unk>" , lowercase=None , ):
"""simple docstring"""
A_ , A_ , A_ , A_ : Tuple = bos, unk, pad, eos
A_ : Optional[Any] = []
A_ : Dict = []
A_ : List[Any] = {}
A_ : int = self.add_symbol(lowercase )
A_ : Union[str, Any] = self.add_symbol(lowercase )
A_ : Union[str, Any] = self.add_symbol(lowercase )
A_ : Any = self.add_symbol(lowercase )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(lowercase )
A_ : Tuple = len(self.symbols )
def __eq__( self , lowercase ):
"""simple docstring"""
return self.indices == other.indices
def __getitem__( self , lowercase ):
"""simple docstring"""
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self ):
"""simple docstring"""
return len(self.symbols )
def __contains__( self , lowercase ):
"""simple docstring"""
return sym in self.indices
@classmethod
def lowerCAmelCase_ ( cls , lowercase ):
"""simple docstring"""
A_ : int = cls()
d.add_from_file(lowercase )
return d
def lowerCAmelCase_ ( self , lowercase , lowercase=1 , lowercase=False ):
"""simple docstring"""
if word in self.indices and not overwrite:
A_ : List[Any] = self.indices[word]
A_ : List[str] = self.count[idx] + n
return idx
else:
A_ : int = len(self.symbols )
A_ : Optional[Any] = idx
self.symbols.append(lowercase )
self.count.append(lowercase )
return idx
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
return 0
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
if isinstance(lowercase , lowercase ):
try:
with open(lowercase , 'r' , encoding='utf-8' ) as fd:
self.add_from_file(lowercase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('Incorrect encoding detected in {}, please rebuild the dataset'.format(lowercase ) )
return
A_ : Any = f.readlines()
A_ : List[Any] = self._load_meta(lowercase )
for line in lines[indices_start_line:]:
try:
A_ , A_ : int = line.rstrip().rsplit(' ' , 1 )
if field == "#fairseq:overwrite":
A_ : Optional[int] = True
A_ , A_ : str = line.rsplit(' ' , 1 )
else:
A_ : Optional[int] = False
A_ : Optional[int] = int(lowercase )
A_ : Tuple = line
if word in self and not overwrite:
raise RuntimeError(
'Duplicate word found when loading Dictionary: \'{}\'. '
'Duplicate words can overwrite earlier ones by adding the '
'#fairseq:overwrite flag at the end of the corresponding row '
'in the dictionary file. If using the Camembert model, please '
'download an updated copy of the model file.'.format(lowercase ) )
self.add_symbol(lowercase , n=lowercase , overwrite=lowercase )
except ValueError:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt> [flags]\'' )
def UpperCamelCase ( __lowercase : Any ):
'''simple docstring'''
A_ : Optional[Any] = dict((re.sub(r'@@$' ,'' ,__lowercase ), v) if k.endswith('@@' ) else (re.sub(r'$' ,'</w>' ,__lowercase ), v) for k, v in d.items() )
A_ : Optional[Any] = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[f'''{k}</w>''']
A_ : Union[str, Any] = d[k] # restore
return da
def UpperCamelCase ( __lowercase : Any ,__lowercase : str ):
'''simple docstring'''
if not os.path.exists(__lowercase ):
raise ValueError(f'''path {biogpt_checkpoint_path} does not exist!''' )
os.makedirs(__lowercase ,exist_ok=__lowercase )
print(f'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
A_ : Optional[Any] = os.path.join(__lowercase ,'checkpoint.pt' )
if not os.path.isfile(__lowercase ):
raise ValueError(f'''path to the file {checkpoint_file} does not exist!''' )
A_ : Any = torch.load(__lowercase ,map_location='cpu' )
A_ : str = chkpt['cfg']['model']
# dicts
A_ : Any = os.path.join(__lowercase ,'dict.txt' )
if not os.path.isfile(__lowercase ):
raise ValueError(f'''path to the file {dict_file} does not exist!''' )
A_ : Optional[int] = Dictionary.load(__lowercase )
A_ : Union[str, Any] = rewrite_dict_keys(src_dict.indices )
A_ : List[Any] = len(__lowercase )
A_ : Tuple = os.path.join(__lowercase ,VOCAB_FILES_NAMES['vocab_file'] )
print(f'''Generating {src_vocab_file} of {src_vocab_size} records''' )
with open(__lowercase ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(__lowercase ,ensure_ascii=__lowercase ,indent=__lowercase ) )
# merges_file (bpecodes)
A_ : List[Any] = os.path.join(__lowercase ,'bpecodes' )
if not os.path.isfile(__lowercase ):
raise ValueError(f'''path to the file {bpecodes_file} does not exist!''' )
A_ : Optional[Any] = os.path.join(__lowercase ,VOCAB_FILES_NAMES['merges_file'] )
shutil.copyfile(__lowercase ,__lowercase )
# model config
A_ : Dict = os.path.join(__lowercase ,'config.json' )
A_ : List[Any] = {
'activation_dropout': args['activation_dropout'],
'architectures': ['BioGptForCausalLM'],
'attention_probs_dropout_prob': args['attention_dropout'],
'bos_token_id': 0,
'eos_token_id': 2,
'hidden_act': args['activation_fn'],
'hidden_dropout_prob': args['dropout'],
'hidden_size': args['decoder_embed_dim'],
'initializer_range': 0.02,
'intermediate_size': args['decoder_ffn_embed_dim'],
'layer_norm_eps': 1e-1_2,
'layerdrop': args['decoder_layerdrop'],
'max_position_embeddings': args['max_target_positions'],
'model_type': 'biogpt',
'num_attention_heads': args['decoder_attention_heads'],
'num_hidden_layers': args['decoder_layers'],
'pad_token_id': 1,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_decoder_input_output_embed'],
'vocab_size': src_vocab_size,
}
# good hparam defaults to start with
print(f'''Generating {biogpt_model_config_file}''' )
with open(__lowercase ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(__lowercase ,ensure_ascii=__lowercase ,indent=__lowercase ) )
# tokenizer config
A_ : List[Any] = os.path.join(__lowercase ,__lowercase )
A_ : Dict = {
'bos_token': '<s>',
'eos_token': '</s>',
'model_max_length': 10_24,
'pad_token': '<pad>',
'special_tokens_map_file': None,
'tokenizer_class': 'BioGptTokenizer',
'unk_token': '<unk>',
}
print(f'''Generating {biogpt_tokenizer_config_file}''' )
with open(__lowercase ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(__lowercase ,ensure_ascii=__lowercase ,indent=__lowercase ) )
# model
A_ : Any = chkpt['model']
# remove unneeded keys
A_ : List[Any] = [
'decoder.version',
]
for k in ignore_keys:
model_state_dict.pop(__lowercase ,__lowercase )
A_ : int = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('output_projection.weight' ):
A_ : Union[str, Any] = model_state_dict.pop(__lowercase )
else:
A_ : str = model_state_dict.pop(__lowercase )
A_ : Optional[int] = BioGptConfig.from_pretrained(__lowercase )
A_ : List[Any] = BioGptForCausalLM(__lowercase )
# check that it loads ok
model_new.load_state_dict(__lowercase )
# save
A_ : List[str] = os.path.join(__lowercase ,__lowercase )
print(f'''Generating {pytorch_weights_dump_path}''' )
torch.save(__lowercase ,__lowercase )
print('Conversion is done!' )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--biogpt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_UpperCAmelCase = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 140 | 1 |
"""simple docstring"""
from statistics import mean
import numpy as np
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = 0
# Number of processes finished
_lowerCamelCase : List[str] = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
_lowerCamelCase : List[Any] = [0] * no_of_process
# List to include calculation results
_lowerCamelCase : Dict = [0] * no_of_process
# Sort by arrival time.
_lowerCamelCase : Union[str, Any] = [burst_time[i] for i in np.argsort(_lowerCamelCase )]
_lowerCamelCase : Union[str, Any] = [process_name[i] for i in np.argsort(_lowerCamelCase )]
arrival_time.sort()
while no_of_process > finished_process_count:
_lowerCamelCase : Union[str, Any] = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
_lowerCamelCase : Any = arrival_time[i]
_lowerCamelCase : Optional[Any] = 0
# Index showing the location of the process being performed
_lowerCamelCase : Union[str, Any] = 0
# Saves the current response ratio.
_lowerCamelCase : List[str] = 0
for i in range(0 , _lowerCamelCase ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
_lowerCamelCase : List[Any] = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
_lowerCamelCase : Optional[Any] = temp
_lowerCamelCase : Optional[Any] = i
# Calculate the turn around time
_lowerCamelCase : List[str] = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
_lowerCamelCase : Dict = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCamelCase : List[str] = [0] * no_of_process
for i in range(0 , _lowerCamelCase ):
_lowerCamelCase : Optional[int] = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
_lowerCAmelCase : Any = 5
_lowerCAmelCase : int = ['''A''', '''B''', '''C''', '''D''', '''E''']
_lowerCAmelCase : Union[str, Any] = [1, 2, 3, 4, 5]
_lowerCAmelCase : Dict = [1, 2, 3, 4, 5]
_lowerCAmelCase : Optional[Any] = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
_lowerCAmelCase : int = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print('''Process name \tArrival time \tBurst time \tTurn around time \tWaiting time''')
for i in range(0, no_of_process):
print(
f'''{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'''
f'''{turn_around_time[i]}\t\t\t{waiting_time[i]}'''
)
print(f'''average waiting time : {mean(waiting_time):.5f}''')
print(f'''average turn around time : {mean(turn_around_time):.5f}''') | 350 |
"""simple docstring"""
from collections import defaultdict
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Optional[int] = 1
_lowerCamelCase : str = True
for v in tree[start]:
if v not in visited:
ret += dfs(_lowerCamelCase )
if ret % 2 == 0:
cuts.append(_lowerCamelCase )
return ret
def lowerCamelCase_( ) -> int:
'''simple docstring'''
dfs(1 )
if __name__ == "__main__":
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = 10, 9
_lowerCAmelCase : str = defaultdict(list)
_lowerCAmelCase : dict[int, bool] = {}
_lowerCAmelCase : list[int] = []
_lowerCAmelCase : Any = 0
_lowerCAmelCase : Any = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1) | 340 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_autoformer''': [
'''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AutoformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AutoformerForPrediction''',
'''AutoformerModel''',
'''AutoformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 153 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : List[str]=13 , lowerCamelCase_ : Any=3 , lowerCamelCase_ : Dict=2_24 , lowerCamelCase_ : List[Any]=30 , lowerCamelCase_ : Union[str, Any]=4_00 , lowerCamelCase_ : str=True , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , lowerCamelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = size if size is not None else {"""height""": 18, """width""": 18}
SCREAMING_SNAKE_CASE : Optional[Any] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : Any = num_channels
SCREAMING_SNAKE_CASE : str = image_size
SCREAMING_SNAKE_CASE : Dict = min_resolution
SCREAMING_SNAKE_CASE : List[str] = max_resolution
SCREAMING_SNAKE_CASE : str = do_resize
SCREAMING_SNAKE_CASE : Optional[Any] = size
SCREAMING_SNAKE_CASE : Optional[Any] = do_normalize
SCREAMING_SNAKE_CASE : List[Any] = image_mean
SCREAMING_SNAKE_CASE : str = image_std
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class UpperCamelCase__ ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ViTImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = EfficientFormerImageProcessorTester(self )
@property
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase_ , """image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """image_std""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """size""" ) )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : List[str] = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : List[str] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[int] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Tuple = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : Any = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Optional[int] = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
| 323 | 0 |
'''simple docstring'''
import fire
from utils import calculate_rouge, save_json
def __lowerCamelCase ( __snake_case : Optional[int], __snake_case : Optional[Any], __snake_case : Optional[Any]=None, **__snake_case : Tuple ) -> str:
"""simple docstring"""
A__ : Any =[x.strip() for x in open(__snake_case ).readlines()]
A__ : int =[x.strip() for x in open(__snake_case ).readlines()][: len(__snake_case )]
A__ : Union[str, Any] =calculate_rouge(__snake_case, __snake_case, **__snake_case )
if save_path is not None:
save_json(__snake_case, __snake_case, indent=__snake_case )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 136 |
'''simple docstring'''
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = (CMStochasticIterativeScheduler,)
__snake_case = 10
def lowercase__ ( self : List[str] , **lowerCAmelCase_ : Dict ) -> Dict:
'''simple docstring'''
A__ : int ={
"""num_train_timesteps""": 2_01,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
config.update(**lowerCAmelCase_ )
return config
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
A__ : Dict =10
A__ : str =self.get_scheduler_config()
A__ : Any =self.scheduler_classes[0](**lowerCAmelCase_ )
scheduler.set_timesteps(lowerCAmelCase_ )
A__ : List[Any] =scheduler.timesteps[0]
A__ : Union[str, Any] =scheduler.timesteps[1]
A__ : Optional[int] =self.dummy_sample
A__ : Union[str, Any] =0.1 * sample
A__ : Dict =scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).prev_sample
A__ : int =scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase_ )
def lowercase__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=lowerCAmelCase_ )
def lowercase__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
A__ : Union[str, Any] =self.scheduler_classes[0]
A__ : Dict =self.get_scheduler_config()
A__ : Any =scheduler_class(**lowerCAmelCase_ )
A__ : int =1
scheduler.set_timesteps(lowerCAmelCase_ )
A__ : Any =scheduler.timesteps
A__ : Optional[int] =torch.manual_seed(0 )
A__ : List[Any] =self.dummy_model()
A__ : Optional[int] =self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(lowerCAmelCase_ ):
# 1. scale model input
A__ : Optional[Any] =scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict noise residual
A__ : Union[str, Any] =model(lowerCAmelCase_ , lowerCAmelCase_ )
# 3. predict previous sample x_t-1
A__ : Tuple =scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
A__ : Dict =pred_prev_sample
A__ : Union[str, Any] =torch.sum(torch.abs(lowerCAmelCase_ ) )
A__ : Optional[Any] =torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 192.7614 ) < 1e-2
assert abs(result_mean.item() - 0.2510 ) < 1e-3
def lowercase__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
A__ : Dict =self.scheduler_classes[0]
A__ : Dict =self.get_scheduler_config()
A__ : Tuple =scheduler_class(**lowerCAmelCase_ )
A__ : Tuple =[1_06, 0]
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
A__ : List[Any] =scheduler.timesteps
A__ : Optional[Any] =torch.manual_seed(0 )
A__ : int =self.dummy_model()
A__ : int =self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
A__ : Any =scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict noise residual
A__ : List[str] =model(lowerCAmelCase_ , lowerCAmelCase_ )
# 3. predict previous sample x_t-1
A__ : Dict =scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
A__ : Union[str, Any] =pred_prev_sample
A__ : Union[str, Any] =torch.sum(torch.abs(lowerCAmelCase_ ) )
A__ : List[Any] =torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 347.6357 ) < 1e-2
assert abs(result_mean.item() - 0.4527 ) < 1e-3
def lowercase__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
A__ : Optional[Any] =self.scheduler_classes[0]
A__ : Union[str, Any] =self.get_scheduler_config()
A__ : List[Any] =scheduler_class(**lowerCAmelCase_ )
A__ : Tuple =[39, 30, 12, 15, 0]
with self.assertRaises(lowerCAmelCase_ , msg="""`timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
A__ : Union[str, Any] =self.scheduler_classes[0]
A__ : List[str] =self.get_scheduler_config()
A__ : Tuple =scheduler_class(**lowerCAmelCase_ )
A__ : Dict =[39, 30, 12, 1, 0]
A__ : int =len(lowerCAmelCase_ )
with self.assertRaises(lowerCAmelCase_ , msg="""Can only pass one of `num_inference_steps` or `timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=lowerCAmelCase_ , timesteps=lowerCAmelCase_ )
def lowercase__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
A__ : Optional[Any] =self.scheduler_classes[0]
A__ : Any =self.get_scheduler_config()
A__ : Optional[int] =scheduler_class(**lowerCAmelCase_ )
A__ : List[str] =[scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCAmelCase_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
| 136 | 1 |
'''simple docstring'''
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
A__ : List[Any] = logging.getLogger(__name__)
def UpperCAmelCase__ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any ) -> Tuple:
__lowerCamelCase : List[str] = np.argmax(UpperCAmelCase_ , axis=1 )
return np.sum(outputs == labels )
def UpperCAmelCase__ ( UpperCAmelCase_ : List[str] ) -> Union[str, Any]:
with open(UpperCAmelCase_ , encoding='utf_8' ) as f:
__lowerCamelCase : Union[str, Any] = csv.reader(UpperCAmelCase_ )
__lowerCamelCase : Any = []
next(UpperCAmelCase_ ) # skip the first line
for line in tqdm(UpperCAmelCase_ ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def UpperCAmelCase__ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] ) -> Optional[Any]:
__lowerCamelCase : Optional[Any] = []
for dataset in encoded_datasets:
__lowerCamelCase : Union[str, Any] = len(UpperCAmelCase_ )
__lowerCamelCase : Any = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
__lowerCamelCase : Optional[int] = np.zeros((n_batch, 2) , dtype=np.intaa )
__lowerCamelCase : List[Any] = np.full((n_batch, 2, input_len) , fill_value=-1_00 , dtype=np.intaa )
__lowerCamelCase : Dict = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(UpperCAmelCase_ ):
__lowerCamelCase : List[Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__lowerCamelCase : Union[str, Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__lowerCamelCase : int = with_conta
__lowerCamelCase : Union[str, Any] = with_conta
__lowerCamelCase : Optional[Any] = len(UpperCAmelCase_ ) - 1
__lowerCamelCase : Optional[int] = len(UpperCAmelCase_ ) - 1
__lowerCamelCase : Union[str, Any] = with_conta
__lowerCamelCase : Tuple = with_conta
__lowerCamelCase : Optional[Any] = mc_label
__lowerCamelCase : int = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(UpperCAmelCase_ ) for t in all_inputs ) )
return tensor_datasets
def UpperCAmelCase__ ( ) -> List[Any]:
__lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=UpperCAmelCase_ , default='openai-gpt' , help='pretrained model name' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir' , default=UpperCAmelCase_ , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument('--train_dataset' , type=UpperCAmelCase_ , default='' )
parser.add_argument('--eval_dataset' , type=UpperCAmelCase_ , default='' )
parser.add_argument('--seed' , type=UpperCAmelCase_ , default=42 )
parser.add_argument('--num_train_epochs' , type=UpperCAmelCase_ , default=3 )
parser.add_argument('--train_batch_size' , type=UpperCAmelCase_ , default=8 )
parser.add_argument('--eval_batch_size' , type=UpperCAmelCase_ , default=16 )
parser.add_argument('--adam_epsilon' , default=1e-8 , type=UpperCAmelCase_ , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , type=UpperCAmelCase_ , default=1 )
parser.add_argument(
'--max_steps' , default=-1 , type=UpperCAmelCase_ , help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) , )
parser.add_argument(
'--gradient_accumulation_steps' , type=UpperCAmelCase_ , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--learning_rate' , type=UpperCAmelCase_ , default=6.25e-5 )
parser.add_argument('--warmup_steps' , default=0 , type=UpperCAmelCase_ , help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule' , type=UpperCAmelCase_ , default='warmup_linear' )
parser.add_argument('--weight_decay' , type=UpperCAmelCase_ , default=0.01 )
parser.add_argument('--lm_coef' , type=UpperCAmelCase_ , default=0.9 )
parser.add_argument('--n_valid' , type=UpperCAmelCase_ , default=3_74 )
parser.add_argument('--server_ip' , type=UpperCAmelCase_ , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=UpperCAmelCase_ , default='' , help='Can be used for distant debugging.' )
__lowerCamelCase : Any = parser.parse_args()
print(UpperCAmelCase_ )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=UpperCAmelCase_ )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
__lowerCamelCase : Tuple = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
__lowerCamelCase : Tuple = torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(UpperCAmelCase_ , UpperCAmelCase_ ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__lowerCamelCase : Union[str, Any] = ['_start_', '_delimiter_', '_classify_']
__lowerCamelCase : Dict = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(UpperCAmelCase_ )
__lowerCamelCase : int = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ )
__lowerCamelCase : List[str] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(UpperCAmelCase_ ) )
model.to(UpperCAmelCase_ )
# Load and encode the datasets
def tokenize_and_encode(UpperCAmelCase_ : str ):
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(UpperCAmelCase_ ) )
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return obj
return [tokenize_and_encode(UpperCAmelCase_ ) for o in obj]
logger.info('Encoding dataset...' )
__lowerCamelCase : Union[str, Any] = load_rocstories_dataset(args.train_dataset )
__lowerCamelCase : int = load_rocstories_dataset(args.eval_dataset )
__lowerCamelCase : Union[str, Any] = (train_dataset, eval_dataset)
__lowerCamelCase : int = tokenize_and_encode(UpperCAmelCase_ )
# Compute the max input length for the Transformer
__lowerCamelCase : Optional[int] = model.config.n_positions // 2 - 2
__lowerCamelCase : Union[str, Any] = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
__lowerCamelCase : Tuple = min(UpperCAmelCase_ , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__lowerCamelCase : List[Any] = pre_process_datasets(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , *UpperCAmelCase_ )
__lowerCamelCase , __lowerCamelCase : Optional[Any] = tensor_datasets[0], tensor_datasets[1]
__lowerCamelCase : Optional[int] = TensorDataset(*UpperCAmelCase_ )
__lowerCamelCase : List[str] = RandomSampler(UpperCAmelCase_ )
__lowerCamelCase : str = DataLoader(UpperCAmelCase_ , sampler=UpperCAmelCase_ , batch_size=args.train_batch_size )
__lowerCamelCase : Union[str, Any] = TensorDataset(*UpperCAmelCase_ )
__lowerCamelCase : int = SequentialSampler(UpperCAmelCase_ )
__lowerCamelCase : List[str] = DataLoader(UpperCAmelCase_ , sampler=UpperCAmelCase_ , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__lowerCamelCase : List[Any] = args.max_steps
__lowerCamelCase : Any = args.max_steps // (len(UpperCAmelCase_ ) // args.gradient_accumulation_steps) + 1
else:
__lowerCamelCase : Optional[Any] = len(UpperCAmelCase_ ) // args.gradient_accumulation_steps * args.num_train_epochs
__lowerCamelCase : Optional[Any] = list(model.named_parameters() )
__lowerCamelCase : Optional[int] = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
__lowerCamelCase : List[str] = [
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
__lowerCamelCase : Tuple = AdamW(UpperCAmelCase_ , lr=args.learning_rate , eps=args.adam_epsilon )
__lowerCamelCase : List[str] = get_linear_schedule_with_warmup(
UpperCAmelCase_ , num_warmup_steps=args.warmup_steps , num_training_steps=UpperCAmelCase_ )
if args.do_train:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Any = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ):
__lowerCamelCase : Tuple = 0
__lowerCamelCase : str = 0
__lowerCamelCase : Tuple = tqdm(UpperCAmelCase_ , desc='Training' )
for step, batch in enumerate(UpperCAmelCase_ ):
__lowerCamelCase : Dict = tuple(t.to(UpperCAmelCase_ ) for t in batch )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[str] = batch
__lowerCamelCase : Dict = model(UpperCAmelCase_ , mc_token_ids=UpperCAmelCase_ , lm_labels=UpperCAmelCase_ , mc_labels=UpperCAmelCase_ )
__lowerCamelCase : Union[str, Any] = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__lowerCamelCase : Dict = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__lowerCamelCase : Tuple = 'Training loss: {:.2e} lr: {:.2e}'.format(UpperCAmelCase_ , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__lowerCamelCase : Union[str, Any] = model.module if hasattr(UpperCAmelCase_ , 'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__lowerCamelCase : str = os.path.join(args.output_dir , UpperCAmelCase_ )
__lowerCamelCase : Union[str, Any] = os.path.join(args.output_dir , UpperCAmelCase_ )
torch.save(model_to_save.state_dict() , UpperCAmelCase_ )
model_to_save.config.to_json_file(UpperCAmelCase_ )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
__lowerCamelCase : Optional[int] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
__lowerCamelCase : List[str] = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(UpperCAmelCase_ )
if args.do_eval:
model.eval()
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = 0, 0
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = 0, 0
for batch in tqdm(UpperCAmelCase_ , desc='Evaluating' ):
__lowerCamelCase : List[Any] = tuple(t.to(UpperCAmelCase_ ) for t in batch )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Tuple = batch
with torch.no_grad():
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = model(
UpperCAmelCase_ , mc_token_ids=UpperCAmelCase_ , lm_labels=UpperCAmelCase_ , mc_labels=UpperCAmelCase_ )
__lowerCamelCase : Optional[Any] = mc_logits.detach().cpu().numpy()
__lowerCamelCase : List[str] = mc_labels.to('cpu' ).numpy()
__lowerCamelCase : str = accuracy(UpperCAmelCase_ , UpperCAmelCase_ )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
__lowerCamelCase : Any = eval_loss / nb_eval_steps
__lowerCamelCase : List[str] = eval_accuracy / nb_eval_examples
__lowerCamelCase : Any = tr_loss / nb_tr_steps if args.do_train else None
__lowerCamelCase : Union[str, Any] = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
__lowerCamelCase : List[Any] = os.path.join(args.output_dir , 'eval_results.txt' )
with open(UpperCAmelCase_ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , UpperCAmelCase_ , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 185 |
'''simple docstring'''
import unittest
from knapsack import knapsack as k
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase : int = 0
__lowerCamelCase : Union[str, Any] = [0]
__lowerCamelCase : Any = [0]
__lowerCamelCase : Union[str, Any] = len(SCREAMING_SNAKE_CASE_ )
self.assertEqual(k.knapsack(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , 0 )
__lowerCamelCase : List[str] = [60]
__lowerCamelCase : Union[str, Any] = [10]
__lowerCamelCase : List[Any] = len(SCREAMING_SNAKE_CASE_ )
self.assertEqual(k.knapsack(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , 0 )
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase : Optional[int] = 3
__lowerCamelCase : int = [1, 2, 3]
__lowerCamelCase : str = [3, 2, 1]
__lowerCamelCase : Union[str, Any] = len(SCREAMING_SNAKE_CASE_ )
self.assertEqual(k.knapsack(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , 5 )
def lowercase_ ( self ) -> int:
__lowerCamelCase : Optional[int] = 50
__lowerCamelCase : List[str] = [60, 1_00, 1_20]
__lowerCamelCase : List[str] = [10, 20, 30]
__lowerCamelCase : List[str] = len(SCREAMING_SNAKE_CASE_ )
self.assertEqual(k.knapsack(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , 2_20 )
if __name__ == "__main__":
unittest.main()
| 185 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowercase__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
A__ : Any =ort.SessionOptions()
A__ : List[Any] =False
return options
def lowercase__ ( self : int ) -> Optional[int]:
'''simple docstring'''
A__ : int =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
A__ : List[str] =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
A__ : List[str] =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy""" )
# using the PNDM scheduler by default
A__ : Optional[int] =OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : Optional[int] ="""A red cat sitting on a park bench"""
A__ : Dict =np.random.RandomState(0 )
A__ : int =pipe(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , mask_image=lowerCAmelCase_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=lowerCAmelCase_ , output_type="""np""" , )
A__ : List[str] =output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 358 |
'''simple docstring'''
def __lowerCamelCase ( __snake_case : int ) -> bool:
"""simple docstring"""
if p < 2:
raise ValueError("""p should not be less than 2!""" )
elif p == 2:
return True
A__ : Any =4
A__ : int =(1 << p) - 1
for _ in range(p - 2 ):
A__ : Dict =((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 136 | 0 |
from typing import List
from .keymap import KEYMAP, get_character
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
def decorator(UpperCamelCase_ ):
snake_case = getattr(lowercase__ ,'''handle_key''' ,[] )
handle += [key]
setattr(lowercase__ ,'''handle_key''' ,lowercase__ )
return func
return decorator
def UpperCAmelCase__ (*UpperCamelCase_ ):
"""simple docstring"""
def decorator(UpperCamelCase_ ):
snake_case = getattr(lowercase__ ,'''handle_key''' ,[] )
handle += keys
setattr(lowercase__ ,'''handle_key''' ,lowercase__ )
return func
return decorator
class A__ ( A__ ):
"""simple docstring"""
def __new__( cls , __snake_case , __snake_case , __snake_case ):
snake_case = super().__new__(cls , __A , __A , __A )
if not hasattr(__A , '''key_handler''' ):
setattr(__A , '''key_handler''' , {} )
setattr(__A , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
snake_case = getattr(__A , '''handle_key''' , [] )
for key in handled_keys:
snake_case = value
return new_cls
@staticmethod
def a_ ( cls ):
snake_case = get_character()
if char != KEYMAP["undefined"]:
snake_case = ord(__A )
snake_case = cls.key_handler.get(__A )
if handler:
snake_case = char
return handler(cls )
else:
return None
def UpperCAmelCase__ (cls ):
"""simple docstring"""
return KeyHandler(cls.__name__ ,cls.__bases__ ,cls.__dict__.copy() )
| 127 |
"""simple docstring"""
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="""session""" )
def _snake_case ( ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :Union[str, Any] = 1_0
lowerCAmelCase_ :Optional[int] = datasets.Features(
{
"""tokens""": datasets.Sequence(datasets.Value("""string""" ) ),
"""labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""] ) ),
"""answers""": datasets.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
"""id""": datasets.Value("""int64""" ),
} )
lowerCAmelCase_ :int = datasets.Dataset.from_dict(
{
"""tokens""": [["""foo"""] * 5] * n,
"""labels""": [[1] * 5] * n,
"""answers""": [{"""answer_start""": [9_7], """text""": ["""1976"""]}] * 1_0,
"""id""": list(range(lowercase__ ) ),
} , features=lowercase__ , )
return dataset
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Tuple , lowercase__ : int ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :List[str] = str(tmp_path_factory.mktemp("""data""" ) / """file.arrow""" )
dataset.map(cache_file_name=lowercase__ )
return filename
# FILE_CONTENT + files
__UpperCAmelCase = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : str ) -> str:
'''simple docstring'''
lowerCAmelCase_ :Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt"""
lowerCAmelCase_ :List[Any] = FILE_CONTENT
with open(lowercase__ , """w""" ) as f:
f.write(lowercase__ )
return filename
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : List[Any] ) -> Tuple:
'''simple docstring'''
import bza
lowerCAmelCase_ :Optional[int] = tmp_path_factory.mktemp("""data""" ) / """file.txt.bz2"""
lowerCAmelCase_ :Tuple = bytes(lowercase__ , """utf-8""" )
with bza.open(lowercase__ , """wb""" ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Optional[Any] ) -> Dict:
'''simple docstring'''
import gzip
lowerCAmelCase_ :int = str(tmp_path_factory.mktemp("""data""" ) / """file.txt.gz""" )
lowerCAmelCase_ :Tuple = bytes(lowercase__ , """utf-8""" )
with gzip.open(lowercase__ , """wb""" ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Dict ) -> Optional[int]:
'''simple docstring'''
if datasets.config.LZ4_AVAILABLE:
import lza.frame
lowerCAmelCase_ :List[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.lz4"""
lowerCAmelCase_ :int = bytes(lowercase__ , """utf-8""" )
with lza.frame.open(lowercase__ , """wb""" ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Dict , lowercase__ : Optional[int] ) -> Any:
'''simple docstring'''
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
lowerCAmelCase_ :Dict = tmp_path_factory.mktemp("""data""" ) / """file.txt.7z"""
with pyazr.SevenZipFile(lowercase__ , """w""" ) as archive:
archive.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
import tarfile
lowerCAmelCase_ :Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.tar"""
with tarfile.TarFile(lowercase__ , """w""" ) as f:
f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Tuple ) -> str:
'''simple docstring'''
import lzma
lowerCAmelCase_ :Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.xz"""
lowerCAmelCase_ :Optional[Any] = bytes(lowercase__ , """utf-8""" )
with lzma.open(lowercase__ , """wb""" ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : List[Any] ) -> Any:
'''simple docstring'''
import zipfile
lowerCAmelCase_ :Dict = tmp_path_factory.mktemp("""data""" ) / """file.txt.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : int ) -> Tuple:
'''simple docstring'''
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
lowerCAmelCase_ :Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.zst"""
lowerCAmelCase_ :Any = bytes(lowercase__ , """utf-8""" )
with zstd.open(lowercase__ , """wb""" ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : List[str] ) -> str:
'''simple docstring'''
lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """file.xml"""
lowerCAmelCase_ :Any = textwrap.dedent(
"""\
<?xml version=\"1.0\" encoding=\"UTF-8\" ?>
<tmx version=\"1.4\">
<header segtype=\"sentence\" srclang=\"ca\" />
<body>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>""" )
with open(lowercase__ , """w""" ) as f:
f.write(lowercase__ )
return filename
__UpperCAmelCase = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
__UpperCAmelCase = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
__UpperCAmelCase = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
__UpperCAmelCase = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
__UpperCAmelCase = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope="""session""" )
def _snake_case ( ) -> Union[str, Any]:
'''simple docstring'''
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : int ) -> Any:
'''simple docstring'''
lowerCAmelCase_ :Tuple = datasets.Dataset.from_dict(lowercase__ )
lowerCAmelCase_ :List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.arrow""" )
dataset.map(cache_file_name=lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : int ) -> str:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.sqlite""" )
with contextlib.closing(sqlitea.connect(lowercase__ ) ) as con:
lowerCAmelCase_ :Union[str, Any] = con.cursor()
cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""" )
for item in DATA:
cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Tuple ) -> int:
'''simple docstring'''
lowerCAmelCase_ :List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.csv""" )
with open(lowercase__ , """w""" , newline="""""" ) as f:
lowerCAmelCase_ :Optional[int] = csv.DictWriter(lowercase__ , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Dict ) -> Any:
'''simple docstring'''
lowerCAmelCase_ :str = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.csv""" )
with open(lowercase__ , """w""" , newline="""""" ) as f:
lowerCAmelCase_ :Dict = csv.DictWriter(lowercase__ , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : str , lowercase__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
import bza
lowerCAmelCase_ :int = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.bz2"""
with open(lowercase__ , """rb""" ) as f:
lowerCAmelCase_ :Union[str, Any] = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowercase__ , """wb""" ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : Any ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : List[str] , lowercase__ : List[str] , lowercase__ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.basename(csv_path.replace(""".csv""" , """.CSV""" ) ) )
f.write(lowercase__ , arcname=os.path.basename(csva_path.replace(""".csv""" , """.CSV""" ) ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Optional[int] , lowercase__ : Tuple , lowercase__ : str ) -> Any:
'''simple docstring'''
lowerCAmelCase_ :int = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.csv.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) )
f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Dict ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.parquet""" )
lowerCAmelCase_ :Optional[Any] = pa.schema(
{
"""col_1""": pa.string(),
"""col_2""": pa.intaa(),
"""col_3""": pa.floataa(),
} )
with open(lowercase__ , """wb""" ) as f:
lowerCAmelCase_ :Optional[int] = pq.ParquetWriter(lowercase__ , schema=lowercase__ )
lowerCAmelCase_ :List[str] = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowercase__ ) )] for k in DATA[0]} , schema=lowercase__ )
writer.write_table(lowercase__ )
writer.close()
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Tuple ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
lowerCAmelCase_ :Union[str, Any] = {"""data""": DATA}
with open(lowercase__ , """w""" ) as f:
json.dump(lowercase__ , lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : str ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
lowerCAmelCase_ :Optional[Any] = {"""data""": DATA_DICT_OF_LISTS}
with open(lowercase__ , """w""" ) as f:
json.dump(lowercase__ , lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Tuple ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl""" )
with open(lowercase__ , """w""" ) as f:
for item in DATA:
f.write(json.dumps(lowercase__ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Any ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.jsonl""" )
with open(lowercase__ , """w""" ) as f:
for item in DATA:
f.write(json.dumps(lowercase__ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :str = str(tmp_path_factory.mktemp("""data""" ) / """dataset_312.jsonl""" )
with open(lowercase__ , """w""" ) as f:
for item in DATA_312:
f.write(json.dumps(lowercase__ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Any ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ :Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset-str.jsonl""" )
with open(lowercase__ , """w""" ) as f:
for item in DATA_STR:
f.write(json.dumps(lowercase__ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : int , lowercase__ : Dict ) -> Optional[int]:
'''simple docstring'''
import gzip
lowerCAmelCase_ :Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt.gz""" )
with open(lowercase__ , """rb""" ) as orig_file:
with gzip.open(lowercase__ , """wb""" ) as zipped_file:
zipped_file.writelines(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : List[str] , lowercase__ : List[Any] ) -> Any:
'''simple docstring'''
import gzip
lowerCAmelCase_ :Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.gz""" )
with open(lowercase__ , """rb""" ) as orig_file:
with gzip.open(lowercase__ , """wb""" ) as zipped_file:
zipped_file.writelines(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : List[str] , lowercase__ : Optional[int] , lowercase__ : List[Any] ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Any , lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.join("""nested""" , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Any , lowercase__ : List[Any] , lowercase__ : List[str] ) -> int:
'''simple docstring'''
lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.jsonl.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) )
f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Any , lowercase__ : str , lowercase__ : List[str] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :Any = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.tar"""
with tarfile.TarFile(lowercase__ , """w""" ) as f:
f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Dict , lowercase__ : str , lowercase__ : List[str] , lowercase__ : int ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ :int = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.tar"""
with tarfile.TarFile(lowercase__ , """w""" ) as f:
f.add(lowercase__ , arcname=os.path.join("""nested""" , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : List[str] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ :str = ["""0""", """1""", """2""", """3"""]
lowerCAmelCase_ :List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt""" )
with open(lowercase__ , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : List[str] ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ :int = ["""0""", """1""", """2""", """3"""]
lowerCAmelCase_ :List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.txt""" )
with open(lowercase__ , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : List[Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :Dict = ["""0""", """1""", """2""", """3"""]
lowerCAmelCase_ :Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.abc"""
with open(lowercase__ , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : List[str] , lowercase__ : str , lowercase__ : int ) -> str:
'''simple docstring'''
lowerCAmelCase_ :Any = tmp_path_factory.mktemp("""data""" ) / """dataset.text.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Tuple , lowercase__ : Tuple , lowercase__ : List[str] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.text.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) )
f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Optional[int] , lowercase__ : Any , lowercase__ : Tuple ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.ext.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.basename("""unsupported.ext""" ) )
f.write(lowercase__ , arcname=os.path.basename("""unsupported_2.ext""" ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Tuple ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ :Optional[Any] = """\n""".join(["""First""", """Second\u2029with Unicode new line""", """Third"""] )
lowerCAmelCase_ :str = str(tmp_path_factory.mktemp("""data""" ) / """dataset_with_unicode_new_lines.txt""" )
with open(lowercase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( ) -> int:
'''simple docstring'''
return os.path.join("""tests""" , """features""" , """data""" , """test_image_rgb.jpg""" )
@pytest.fixture(scope="""session""" )
def _snake_case ( ) -> Tuple:
'''simple docstring'''
return os.path.join("""tests""" , """features""" , """data""" , """test_audio_44100.wav""" )
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Any , lowercase__ : Tuple ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ :Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.img.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ).replace(""".jpg""" , """2.jpg""" ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Tuple ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ :int = tmp_path_factory.mktemp("""data_dir""" )
(data_dir / "subdir").mkdir()
with open(data_dir / """subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 1_0 )
with open(data_dir / """subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 1_0 )
# hidden file
with open(data_dir / """subdir""" / """.test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 1_0 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / """.subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 1_0 )
with open(data_dir / """.subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 1_0 )
return data_dir
| 84 | 0 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
lowercase =[
'good first issue',
'feature request',
'wip',
]
def lowerCamelCase__ ( ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] =Github(os.environ['GITHUB_TOKEN'] )
_UpperCAmelCase : Union[str, Any] =g.get_repo('huggingface/accelerate' )
_UpperCAmelCase : Any =repo.get_issues(state='open' )
for issue in open_issues:
_UpperCAmelCase : Union[str, Any] =sorted([comment for comment in issue.get_comments()] , key=lambda __lowerCamelCase : i.created_at , reverse=__lowerCamelCase )
_UpperCAmelCase : List[str] =comments[0] if len(__lowerCamelCase ) > 0 else None
_UpperCAmelCase : str =dt.utcnow()
_UpperCAmelCase : Tuple =(current_time - issue.updated_at).days
_UpperCAmelCase : str =(current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='closed' )
elif (
days_since_updated > 2_3
and days_since_creation >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 353 |
'''simple docstring'''
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowercase =logging.get_logger(__name__) # pylint: disable=invalid-name
class __magic_name__ ( lowerCAmelCase ,lowerCAmelCase ):
@register_to_config
def __init__( self , snake_case , snake_case = None , snake_case = None) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
_UpperCAmelCase : List[Any] =learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
_UpperCAmelCase : str =torch.zeros(snake_case , snake_case)
else:
_UpperCAmelCase : Tuple =None
_UpperCAmelCase : int =torch.nn.Parameter(snake_case)
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase =42
UpperCAmelCase =42
UpperCAmelCase =42
UpperCAmelCase =42
UpperCAmelCase =42
UpperCAmelCase =42
def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ) -> Tuple:
'''simple docstring'''
super().__init__()
self.register_modules(
vqvae=snake_case , transformer=snake_case , text_encoder=snake_case , tokenizer=snake_case , scheduler=snake_case , learned_classifier_free_sampling_embeddings=snake_case , )
def lowerCAmelCase ( self , snake_case , snake_case , snake_case) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : int =len(snake_case) if isinstance(snake_case , snake_case) else 1
# get prompt text embeddings
_UpperCAmelCase : Optional[Any] =self.tokenizer(
snake_case , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
_UpperCAmelCase : Union[str, Any] =text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_UpperCAmelCase : str =self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f" {self.tokenizer.model_max_length} tokens: {removed_text}")
_UpperCAmelCase : Union[str, Any] =text_input_ids[:, : self.tokenizer.model_max_length]
_UpperCAmelCase : Optional[int] =self.text_encoder(text_input_ids.to(self.device))[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
_UpperCAmelCase : List[str] =prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=snake_case)
# duplicate text embeddings for each generation per prompt
_UpperCAmelCase : Optional[Any] =prompt_embeds.repeat_interleave(snake_case , dim=0)
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
_UpperCAmelCase : Dict =self.learned_classifier_free_sampling_embeddings.embeddings
_UpperCAmelCase : Any =negative_prompt_embeds.unsqueeze(0).repeat(snake_case , 1 , 1)
else:
_UpperCAmelCase : str =[''] * batch_size
_UpperCAmelCase : Dict =text_input_ids.shape[-1]
_UpperCAmelCase : str =self.tokenizer(
snake_case , padding='max_length' , max_length=snake_case , truncation=snake_case , return_tensors='pt' , )
_UpperCAmelCase : str =self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# See comment for normalizing text embeddings
_UpperCAmelCase : Tuple =negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=snake_case)
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_UpperCAmelCase : int =negative_prompt_embeds.shape[1]
_UpperCAmelCase : List[str] =negative_prompt_embeds.repeat(1 , snake_case , 1)
_UpperCAmelCase : Optional[int] =negative_prompt_embeds.view(batch_size * num_images_per_prompt , snake_case , -1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_UpperCAmelCase : str =torch.cat([negative_prompt_embeds, prompt_embeds])
return prompt_embeds
@torch.no_grad()
def __call__( self , snake_case , snake_case = 1_0_0 , snake_case = 5.0 , snake_case = 1.0 , snake_case = 1 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
if isinstance(snake_case , snake_case):
_UpperCAmelCase : Tuple =1
elif isinstance(snake_case , snake_case):
_UpperCAmelCase : int =len(snake_case)
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(snake_case)}")
_UpperCAmelCase : Optional[Any] =batch_size * num_images_per_prompt
_UpperCAmelCase : Union[str, Any] =guidance_scale > 1.0
_UpperCAmelCase : Any =self._encode_prompt(snake_case , snake_case , snake_case)
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(snake_case , snake_case) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(snake_case)}.")
# get the initial completely masked latents unless the user supplied it
_UpperCAmelCase : List[Any] =(batch_size, self.transformer.num_latent_pixels)
if latents is None:
_UpperCAmelCase : Optional[Any] =self.transformer.num_vector_embeds - 1
_UpperCAmelCase : Optional[int] =torch.full(snake_case , snake_case).to(self.device)
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'
f" {self.transformer.num_vector_embeds - 1} (inclusive).")
_UpperCAmelCase : Optional[Any] =latents.to(self.device)
# set timesteps
self.scheduler.set_timesteps(snake_case , device=self.device)
_UpperCAmelCase : int =self.scheduler.timesteps.to(self.device)
_UpperCAmelCase : Dict =latents
for i, t in enumerate(self.progress_bar(snake_case)):
# expand the sample if we are doing classifier free guidance
_UpperCAmelCase : Union[str, Any] =torch.cat([sample] * 2) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
_UpperCAmelCase : Optional[Any] =self.transformer(snake_case , encoder_hidden_states=snake_case , timestep=snake_case).sample
if do_classifier_free_guidance:
_UpperCAmelCase , _UpperCAmelCase : Dict =model_output.chunk(2)
_UpperCAmelCase : Dict =model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(snake_case , dim=1 , keepdim=snake_case)
_UpperCAmelCase : Any =self.truncate(snake_case , snake_case)
# remove `log(0)`'s (`-inf`s)
_UpperCAmelCase : int =model_output.clamp(-7_0)
# compute the previous noisy sample x_t -> x_t-1
_UpperCAmelCase : List[Any] =self.scheduler.step(snake_case , timestep=snake_case , sample=snake_case , generator=snake_case).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(snake_case , snake_case , snake_case)
_UpperCAmelCase : List[str] =self.vqvae.config.vq_embed_dim
_UpperCAmelCase : Optional[int] =(batch_size, self.transformer.height, self.transformer.width, embedding_channels)
_UpperCAmelCase : int =self.vqvae.quantize.get_codebook_entry(snake_case , shape=snake_case)
_UpperCAmelCase : str =self.vqvae.decode(snake_case , force_not_quantize=snake_case).sample
_UpperCAmelCase : str =(image / 2 + 0.5).clamp(0 , 1)
_UpperCAmelCase : Tuple =image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
_UpperCAmelCase : Optional[int] =self.numpy_to_pil(snake_case)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case)
def lowerCAmelCase ( self , snake_case , snake_case) -> torch.FloatTensor:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Dict =torch.sort(snake_case , 1 , descending=snake_case)
_UpperCAmelCase : Dict =torch.exp(snake_case)
_UpperCAmelCase : str =sorted_p_x_0.cumsum(dim=1) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
_UpperCAmelCase : Optional[int] =torch.full_like(keep_mask[:, 0:1, :] , snake_case)
_UpperCAmelCase : Any =torch.cat((all_true, keep_mask) , dim=1)
_UpperCAmelCase : Dict =keep_mask[:, :-1, :]
_UpperCAmelCase : Any =keep_mask.gather(1 , indices.argsort(1))
_UpperCAmelCase : str =log_p_x_0.clone()
_UpperCAmelCase : Any =-torch.inf # -inf = log(0)
return rv
| 242 | 0 |
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _UpperCAmelCase ( lowerCAmelCase__ , unittest.TestCase):
_lowerCAmelCase : int = None
_lowerCAmelCase : Optional[Any] = BloomTokenizerFast
_lowerCAmelCase : Any = BloomTokenizerFast
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : Dict = False
_lowerCAmelCase : List[Any] = """tokenizer_file"""
_lowerCAmelCase : List[Any] = {"""bos_token""": """<s>""", """eos_token""": """</s>""", """unk_token""": """<unk>""", """pad_token""": """<pad>"""}
def _snake_case ( self : Tuple ):
super().setUp()
snake_case_ : Dict = BloomTokenizerFast.from_pretrained('''bigscience/tokenizer''' )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self : List[str] , **lowercase_ : Optional[int] ):
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **lowercase_ )
def _snake_case ( self : Dict ):
snake_case_ : Dict = self.get_rust_tokenizer()
snake_case_ : Optional[int] = ['''The quick brown fox</s>''', '''jumps over the lazy dog</s>''']
snake_case_ : Optional[int] = [[2175, 23714, 73173, 144252, 2], [77, 132619, 3478, 368, 109586, 35433, 2]]
snake_case_ : Union[str, Any] = tokenizer.batch_encode_plus(lowercase_ )['''input_ids''']
self.assertListEqual(lowercase_ , lowercase_ )
snake_case_ : Dict = tokenizer.batch_decode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def _snake_case ( self : List[Any] , lowercase_ : Dict=6 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case_ : Dict = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
snake_case_ : List[Any] = '''This is a simple input'''
snake_case_ : Optional[Any] = ['''This is a simple input 1''', '''This is a simple input 2''']
snake_case_ : List[Any] = ('''This is a simple input''', '''This is a pair''')
snake_case_ : Dict = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
try:
tokenizer_r.encode(lowercase_ , max_length=lowercase_ )
tokenizer_r.encode_plus(lowercase_ , max_length=lowercase_ )
tokenizer_r.batch_encode_plus(lowercase_ , max_length=lowercase_ )
tokenizer_r.encode(lowercase_ , max_length=lowercase_ )
tokenizer_r.batch_encode_plus(lowercase_ , max_length=lowercase_ )
except ValueError:
self.fail('''Bloom Tokenizer should be able to deal with padding''' )
snake_case_ : Tuple = None # Hotfixing padding = None
self.assertRaises(lowercase_ , tokenizer_r.encode , lowercase_ , max_length=lowercase_ , padding='''max_length''' )
# Simple input
self.assertRaises(lowercase_ , tokenizer_r.encode_plus , lowercase_ , max_length=lowercase_ , padding='''max_length''' )
# Simple input
self.assertRaises(
lowercase_ , tokenizer_r.batch_encode_plus , lowercase_ , max_length=lowercase_ , padding='''max_length''' , )
# Pair input
self.assertRaises(lowercase_ , tokenizer_r.encode , lowercase_ , max_length=lowercase_ , padding='''max_length''' )
# Pair input
self.assertRaises(lowercase_ , tokenizer_r.encode_plus , lowercase_ , max_length=lowercase_ , padding='''max_length''' )
# Pair input
self.assertRaises(
lowercase_ , tokenizer_r.batch_encode_plus , lowercase_ , max_length=lowercase_ , padding='''max_length''' , )
def _snake_case ( self : Any ):
snake_case_ : List[str] = self.get_rust_tokenizer()
snake_case_ : str = load_dataset('''xnli''' , '''all_languages''' , split='''test''' , streaming=lowercase_ )
snake_case_ : int = next(iter(lowercase_ ) )['''premise'''] # pick up one data
snake_case_ : int = list(sample_data.values() )
snake_case_ : List[Any] = list(map(tokenizer.encode , lowercase_ ) )
snake_case_ : Dict = [tokenizer.decode(lowercase_ , clean_up_tokenization_spaces=lowercase_ ) for x in output_tokens]
self.assertListEqual(lowercase_ , lowercase_ )
def _snake_case ( self : Any ):
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 264 |
"""simple docstring"""
def __lowercase ( _a , _a , _a=False ):
if isinstance(_a , _a ) and isinstance(_a , _a ):
snake_case_ : Union[str, Any] = len(set_a.intersection(_a ) )
if alternative_union:
snake_case_ : Any = len(_a ) + len(_a )
else:
snake_case_ : str = len(set_a.union(_a ) )
return intersection / union
if isinstance(_a , (list, tuple) ) and isinstance(_a , (list, tuple) ):
snake_case_ : str = [element for element in set_a if element in set_b]
if alternative_union:
snake_case_ : Tuple = len(_a ) + len(_a )
return len(_a ) / union
else:
snake_case_ : List[Any] = set_a + [element for element in set_b if element not in set_a]
return len(_a ) / len(_a )
return len(_a ) / len(_a )
return None
if __name__ == "__main__":
lowercase__ : Any = {'''a''', '''b''', '''c''', '''d''', '''e'''}
lowercase__ : Optional[Any] = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''}
print(jaccard_similarity(set_a, set_b))
| 264 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 359 |
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
snake_case = """3"""
print("""Python version:""", sys.version)
print("""OS platform:""", platform.platform())
print("""OS architecture:""", platform.machine())
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
except ImportError:
print("""Torch version:""", None)
try:
import transformers
print("""transformers version:""", transformers.__version__)
except ImportError:
print("""transformers version:""", None)
| 319 | 0 |
import os
import sys
import unittest
lowerCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowerCAmelCase = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
lowerCAmelCase = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class A ( unittest.TestCase ):
def _A (self ):
__lowercase= get_test_to_tester_mapping(_lowerCAmelCase )
__lowercase= get_test_to_tester_mapping(_lowerCAmelCase )
__lowercase= {'BertModelTest': 'BertModelTester'}
__lowercase= {
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
def _A (self ):
__lowercase= get_model_to_test_mapping(_lowerCAmelCase )
__lowercase= get_model_to_test_mapping(_lowerCAmelCase )
__lowercase= {
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
__lowercase= {
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
def _A (self ):
__lowercase= get_model_to_tester_mapping(_lowerCAmelCase )
__lowercase= get_model_to_tester_mapping(_lowerCAmelCase )
__lowercase= {
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
__lowercase= {
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
| 295 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCAmelCase :
'''simple docstring'''
def __init__(self : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int=13 , _lowerCAmelCase : Any=32 , _lowerCAmelCase : Dict=2 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : Dict=16 , _lowerCAmelCase : str=[1, 2, 1] , _lowerCAmelCase : List[Any]=[2, 2, 4] , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : Any=2.0 , _lowerCAmelCase : Any=True , _lowerCAmelCase : int=0.0 , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : List[str]="gelu" , _lowerCAmelCase : Optional[Any]=False , _lowerCAmelCase : str=True , _lowerCAmelCase : List[str]=0.02 , _lowerCAmelCase : Dict=1e-5 , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Dict=None , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Dict=10 , _lowerCAmelCase : int=8 , ):
A = parent
A = batch_size
A = image_size
A = patch_size
A = num_channels
A = embed_dim
A = depths
A = num_heads
A = window_size
A = mlp_ratio
A = qkv_bias
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = drop_path_rate
A = hidden_act
A = use_absolute_embeddings
A = patch_norm
A = layer_norm_eps
A = initializer_range
A = is_training
A = scope
A = use_labels
A = type_sequence_label_size
A = encoder_stride
def A (self : Dict ):
A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A = self.get_config()
return config, pixel_values, labels
def A (self : Optional[Any] ):
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def A (self : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] ):
A = SwinvaModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
A = model(_lowerCAmelCase )
A = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
A = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def A (self : Tuple , _lowerCAmelCase : Dict , _lowerCAmelCase : int , _lowerCAmelCase : Optional[int] ):
A = SwinvaForMaskedImageModeling(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
A = model(_lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
A = 1
A = SwinvaForMaskedImageModeling(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def A (self : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : Any ):
A = self.type_sequence_label_size
A = SwinvaForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
A = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A (self : Union[str, Any] ):
A = self.prepare_config_and_inputs()
A , A , A = config_and_inputs
A = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
__lowerCAmelCase = (
{'''feature-extraction''': SwinvaModel, '''image-classification''': SwinvaForImageClassification}
if is_torch_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def A (self : Any ):
A = SwinvaModelTester(self )
A = ConfigTester(self , config_class=_lowerCAmelCase , embed_dim=37 )
def A (self : Dict ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A (self : int ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
@unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" )
def A (self : Dict ):
pass
@unittest.skip(reason="""Swinv2 does not use inputs_embeds""" )
def A (self : Optional[int] ):
pass
def A (self : List[str] ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def A (self : Optional[int] ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(_lowerCAmelCase )
A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def A (self : int ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = True
for model_class in self.all_model_classes:
A = True
A = False
A = True
A = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
A = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
A = outputs.attentions
A = len(self.model_tester.depths )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A = True
A = config.window_size**2
A = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
A = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
A = outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
A = len(_lowerCAmelCase )
# Check attention is always last and order is fine
A = True
A = True
A = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
A = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
if hasattr(self.model_tester , """num_hidden_states_types""" ):
A = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
A = 2
self.assertEqual(out_len + added_hidden_states , len(_lowerCAmelCase ) )
A = outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def A (self : List[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] ):
A = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
A = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
A = outputs.hidden_states
A = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
# Swinv2 has a different seq_length
A = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
A = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
A = outputs.reshaped_hidden_states
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
A , A , A , A = reshaped_hidden_states[0].shape
A = (
reshaped_hidden_states[0].view(_lowerCAmelCase , _lowerCAmelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def A (self : Tuple ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
A = True
self.check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A = True
self.check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def A (self : List[str] ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = 3
A = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
A = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
A = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
A = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
A = True
self.check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A = True
self.check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , (padded_height, padded_width) )
def A (self : Optional[int] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCAmelCase )
def A (self : Union[str, Any] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def A (self : Optional[Any] ):
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = SwinvaModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def A (self : Optional[Any] ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = _config_zero_init(_lowerCAmelCase )
for model_class in self.all_model_classes:
A = model_class(config=_lowerCAmelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A (self : List[str] ):
return (
AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" )
if is_vision_available()
else None
)
@slow
def A (self : List[str] ):
A = SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to(
_lowerCAmelCase )
A = self.default_image_processor
A = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
A = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
A = model(**_lowerCAmelCase )
# verify the logits
A = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
A = torch.tensor([-0.3_947, -0.4_306, 0.0_026] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
| 258 | 0 |
__UpperCAmelCase = 'Tobias Carryer'
from time import time
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , __A , __A , __A=int(time() ) ) -> Union[str, Any]: # noqa: B008
lowerCAmelCase_ :Tuple = multiplier
lowerCAmelCase_ :Union[str, Any] = increment
lowerCAmelCase_ :Any = modulo
lowerCAmelCase_ :Optional[int] = seed
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :Any = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
__UpperCAmelCase = LinearCongruentialGenerator(1_66_45_25, 10_13_90_42_23, 2 << 31)
while True:
print(lcg.next_number())
| 358 |
"""simple docstring"""
from __future__ import annotations
__UpperCAmelCase = 1.6021e-19 # units = C
def _snake_case ( lowercase__ : float , lowercase__ : float , lowercase__ : float , ) -> tuple[str, float]:
'''simple docstring'''
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif conductivity < 0:
raise ValueError("""Conductivity cannot be negative""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative""" )
elif mobility < 0:
raise ValueError("""mobility cannot be negative""" )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {
"""configuration_perceiver""": ["""PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PerceiverConfig""", """PerceiverOnnxConfig"""],
"""tokenization_perceiver""": ["""PerceiverTokenizer"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""PerceiverFeatureExtractor"""]
lowercase_ = ["""PerceiverImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PerceiverForImageClassificationConvProcessing""",
"""PerceiverForImageClassificationFourier""",
"""PerceiverForImageClassificationLearned""",
"""PerceiverForMaskedLM""",
"""PerceiverForMultimodalAutoencoding""",
"""PerceiverForOpticalFlow""",
"""PerceiverForSequenceClassification""",
"""PerceiverLayer""",
"""PerceiverModel""",
"""PerceiverPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 58 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
__lowerCAmelCase = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Any = AudioClassificationPipeline(model=_UpperCAmelCase , feature_extractor=_UpperCAmelCase )
# test with a raw waveform
__a : Optional[Any] = np.zeros((34000,) )
__a : Union[str, Any] = np.zeros((14000,) )
return audio_classifier, [audioa, audio]
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
__a , __a : Dict = examples
__a : Tuple = audio_classifier(_UpperCAmelCase )
# by default a model is initialized with num_labels=2
self.assertEqual(
_UpperCAmelCase , [
{'''score''': ANY(_UpperCAmelCase ), '''label''': ANY(_UpperCAmelCase )},
{'''score''': ANY(_UpperCAmelCase ), '''label''': ANY(_UpperCAmelCase )},
] , )
__a : List[Any] = audio_classifier(_UpperCAmelCase , top_k=1 )
self.assertEqual(
_UpperCAmelCase , [
{'''score''': ANY(_UpperCAmelCase ), '''label''': ANY(_UpperCAmelCase )},
] , )
self.run_torchaudio(_UpperCAmelCase )
@require_torchaudio
def _lowerCamelCase ( self , _UpperCAmelCase ):
import datasets
# test with a local file
__a : Tuple = datasets.load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
__a : Union[str, Any] = dataset[0]['''audio''']['''array''']
__a : Tuple = audio_classifier(_UpperCAmelCase )
self.assertEqual(
_UpperCAmelCase , [
{'''score''': ANY(_UpperCAmelCase ), '''label''': ANY(_UpperCAmelCase )},
{'''score''': ANY(_UpperCAmelCase ), '''label''': ANY(_UpperCAmelCase )},
] , )
@require_torch
def _lowerCamelCase ( self ):
__a : Optional[Any] = '''anton-l/wav2vec2-random-tiny-classifier'''
__a : Union[str, Any] = pipeline('''audio-classification''' , model=_UpperCAmelCase )
__a : Optional[int] = np.ones((8000,) )
__a : Optional[int] = audio_classifier(_UpperCAmelCase , top_k=4 )
__a : Tuple = [
{'''score''': 0.0_8_4_2, '''label''': '''no'''},
{'''score''': 0.0_8_3_8, '''label''': '''up'''},
{'''score''': 0.0_8_3_7, '''label''': '''go'''},
{'''score''': 0.0_8_3_4, '''label''': '''right'''},
]
__a : Dict = [
{'''score''': 0.0_8_4_5, '''label''': '''stop'''},
{'''score''': 0.0_8_4_4, '''label''': '''on'''},
{'''score''': 0.0_8_4_1, '''label''': '''right'''},
{'''score''': 0.0_8_3_4, '''label''': '''left'''},
]
self.assertIn(nested_simplify(_UpperCAmelCase , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
__a : List[Any] = {'''array''': np.ones((8000,) ), '''sampling_rate''': audio_classifier.feature_extractor.sampling_rate}
__a : Optional[Any] = audio_classifier(_UpperCAmelCase , top_k=4 )
self.assertIn(nested_simplify(_UpperCAmelCase , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def _lowerCamelCase ( self ):
import datasets
__a : Tuple = '''superb/wav2vec2-base-superb-ks'''
__a : Optional[int] = pipeline('''audio-classification''' , model=_UpperCAmelCase )
__a : int = datasets.load_dataset('''anton-l/superb_dummy''' , '''ks''' , split='''test''' )
__a : Any = np.array(dataset[3]['''speech'''] , dtype=np.floataa )
__a : Tuple = audio_classifier(_UpperCAmelCase , top_k=4 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=3 ) , [
{'''score''': 0.9_8_1, '''label''': '''go'''},
{'''score''': 0.0_0_7, '''label''': '''up'''},
{'''score''': 0.0_0_6, '''label''': '''_unknown_'''},
{'''score''': 0.0_0_1, '''label''': '''down'''},
] , )
@require_tf
@unittest.skip('''Audio classification is not implemented for TF''' )
def _lowerCamelCase ( self ):
pass | 160 | 0 |
from collections.abc import Callable
import numpy as np
def __UpperCamelCase ( _A , _A , _A , _A , _A ):
lowerCAmelCase_ = int(np.ceil((x_end - xa) / step_size ) )
lowerCAmelCase_ = np.zeros((n + 1,) )
lowerCAmelCase_ = ya
lowerCAmelCase_ = xa
for k in range(__lowerCamelCase ):
lowerCAmelCase_ = y[k] + step_size * ode_func(__lowerCamelCase , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class A ( __UpperCAmelCase ):
__snake_case = 'gpt_neox'
def __init__( self, UpperCamelCase__=5_0432, UpperCamelCase__=6144, UpperCamelCase__=44, UpperCamelCase__=64, UpperCamelCase__=2_4576, UpperCamelCase__="gelu", UpperCamelCase__=0.25, UpperCamelCase__=1_0000, UpperCamelCase__=0.0, UpperCamelCase__=0.0, UpperCamelCase__=0.1, UpperCamelCase__=2048, UpperCamelCase__=0.02, UpperCamelCase__=1E-5, UpperCamelCase__=True, UpperCamelCase__=0, UpperCamelCase__=2, UpperCamelCase__=False, UpperCamelCase__=True, UpperCamelCase__=None, **UpperCamelCase__, ):
"""simple docstring"""
super().__init__(bos_token_id=UpperCamelCase__, eos_token_id=UpperCamelCase__, **UpperCamelCase__ )
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = rotary_pct
lowerCAmelCase_ = rotary_emb_base
lowerCAmelCase_ = attention_dropout
lowerCAmelCase_ = hidden_dropout
lowerCAmelCase_ = classifier_dropout
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = use_cache
lowerCAmelCase_ = tie_word_embeddings
lowerCAmelCase_ = use_parallel_residual
lowerCAmelCase_ = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling, UpperCamelCase__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"got {self.rope_scaling}" )
lowerCAmelCase_ = self.rope_scaling.get('''type''', UpperCamelCase__ )
lowerCAmelCase_ = self.rope_scaling.get('''factor''', UpperCamelCase__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(UpperCamelCase__, UpperCamelCase__ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 167 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=2 , _UpperCAmelCase=32 , _UpperCAmelCase=16 , _UpperCAmelCase=3 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=4 , _UpperCAmelCase=[0, 1, 2, 3] , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=[1, 384, 24, 24] , _UpperCAmelCase=True , _UpperCAmelCase=None , ):
lowercase__: List[Any] = parent
lowercase__: Any = batch_size
lowercase__: Union[str, Any] = image_size
lowercase__: str = patch_size
lowercase__: str = num_channels
lowercase__: List[Any] = is_training
lowercase__: Any = use_labels
lowercase__: Optional[int] = hidden_size
lowercase__: Any = num_hidden_layers
lowercase__: int = backbone_out_indices
lowercase__: Union[str, Any] = num_attention_heads
lowercase__: Optional[Any] = intermediate_size
lowercase__: str = hidden_act
lowercase__: Optional[int] = hidden_dropout_prob
lowercase__: Optional[Any] = attention_probs_dropout_prob
lowercase__: Any = initializer_range
lowercase__: List[str] = num_labels
lowercase__: Optional[Any] = backbone_featmap_shape
lowercase__: Dict = scope
lowercase__: List[str] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
lowercase__: Tuple = (image_size // patch_size) ** 2
lowercase__: Optional[int] = num_patches + 1
def _snake_case ( self ):
lowercase__: List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__: List[str] = None
if self.use_labels:
lowercase__: Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowercase__: List[Any] = self.get_config()
return config, pixel_values, labels
def _snake_case ( self ):
lowercase__: Dict = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [96, 192, 384, 768],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__UpperCAmelCase , backbone_featmap_shape=self.backbone_featmap_shape , )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Optional[int] = DPTModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowercase__: List[str] = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: List[Any] = self.num_labels
lowercase__: Tuple = DPTForDepthEstimation(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowercase__: Any = model(__UpperCAmelCase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Optional[Any] = self.num_labels
lowercase__: Dict = DPTForSemanticSegmentation(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowercase__: Any = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def _snake_case ( self ):
lowercase__: Any = self.prepare_config_and_inputs()
lowercase__, lowercase__, lowercase__: Tuple = config_and_inputs
lowercase__: Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase (__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[int] = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
_UpperCAmelCase :Any = (
{
"depth-estimation": DPTForDepthEstimation,
"feature-extraction": DPTModel,
"image-segmentation": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_UpperCAmelCase :List[Any] = False
_UpperCAmelCase :Any = False
_UpperCAmelCase :List[Any] = False
def _snake_case ( self ):
lowercase__: int = DPTModelTester(self )
lowercase__: Any = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 )
def _snake_case ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''' )
def _snake_case ( self ):
pass
def _snake_case ( self ):
lowercase__, lowercase__: Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__: Any = model_class(__UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__: Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) )
def _snake_case ( self ):
lowercase__, lowercase__: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__: Union[str, Any] = model_class(__UpperCAmelCase )
lowercase__: List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__: Tuple = [*signature.parameters.keys()]
lowercase__: Dict = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def _snake_case ( self ):
lowercase__: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCAmelCase )
def _snake_case ( self ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
lowercase__, lowercase__: int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__: List[Any] = True
if model_class in get_values(__UpperCAmelCase ):
continue
lowercase__: Any = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
lowercase__: List[Any] = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
lowercase__: Dict = model(**__UpperCAmelCase ).loss
loss.backward()
def _snake_case ( self ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
lowercase__, lowercase__: str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__: Any = False
lowercase__: Tuple = True
if model_class in get_values(__UpperCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
lowercase__: Tuple = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.gradient_checkpointing_enable()
model.train()
lowercase__: Optional[Any] = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
lowercase__: str = model(**__UpperCAmelCase ).loss
loss.backward()
def _snake_case ( self ):
lowercase__, lowercase__: List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__: int = _config_zero_init(__UpperCAmelCase )
for model_class in self.all_model_classes:
lowercase__: Any = model_class(config=__UpperCAmelCase )
# Skip the check for the backbone
lowercase__: Optional[int] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
lowercase__: Optional[Any] = [F"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _snake_case ( self ):
pass
@slow
def _snake_case ( self ):
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
lowercase__: Optional[int] = DPTModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def _snake_case ( self ):
lowercase__, lowercase__: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__: Tuple = '''add'''
with self.assertRaises(__UpperCAmelCase ):
lowercase__: str = DPTForDepthEstimation(__UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
lowercase__: Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ):
lowercase__: Tuple = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' )
lowercase__: str = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(__UpperCAmelCase )
lowercase__: str = prepare_img()
lowercase__: Any = image_processor(images=__UpperCAmelCase , return_tensors='''pt''' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
lowercase__: List[str] = model(**__UpperCAmelCase )
lowercase__: Optional[int] = outputs.predicted_depth
# verify the predicted depth
lowercase__: Dict = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , __UpperCAmelCase )
lowercase__: Tuple = torch.tensor(
[[[5.6_437, 5.6_146, 5.6_511], [5.4_371, 5.5_649, 5.5_958], [5.5_215, 5.5_184, 5.5_293]]] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , __UpperCAmelCase , atol=1e-4 ) )
| 177 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase : Any = logging.get_logger(__name__)
UpperCamelCase : Any = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase : Dict = {
"vocab_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/vocab.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/vocab.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/vocab.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/vocab.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/vocab.json",
},
"merges_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/merges.txt",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/merges.txt",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/merges.txt",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/merges.txt",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/merges.txt",
},
"tokenizer_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/tokenizer.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/tokenizer.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/tokenizer.json",
},
}
UpperCamelCase : Dict = {
"gpt2": 1_0_2_4,
"gpt2-medium": 1_0_2_4,
"gpt2-large": 1_0_2_4,
"gpt2-xl": 1_0_2_4,
"distilgpt2": 1_0_2_4,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ["input_ids", "attention_mask"]
lowercase = GPTaTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase=False , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
__UpperCAmelCase , __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , unk_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , **__UpperCAmelCase , )
__UpperCamelCase = kwargs.pop('add_bos_token' , __UpperCAmelCase )
__UpperCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , __UpperCAmelCase ) != add_prefix_space:
__UpperCamelCase = getattr(__UpperCAmelCase , pre_tok_state.pop('type' ) )
__UpperCamelCase = add_prefix_space
__UpperCamelCase = pre_tok_class(**__UpperCAmelCase )
__UpperCamelCase = add_prefix_space
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = kwargs.get('is_split_into_words' , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = kwargs.get('is_split_into_words' , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
__UpperCamelCase = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) + [self.eos_token_id] )
if len(__UpperCAmelCase ) > self.model_max_length:
__UpperCamelCase = input_ids[-self.model_max_length :]
return input_ids
| 316 | 0 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__lowerCamelCase = """platform"""
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def UpperCamelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any=None , __lowerCamelCase : Dict=None , __lowerCamelCase : Any=None , __lowerCamelCase : str=None , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Union[str, Any]=None , ):
if attention_mask is None:
snake_case : Any = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
snake_case : Optional[Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
snake_case : List[str] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case : int = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
snake_case : List[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class UpperCAmelCase :
def __init__(self : int , snake_case__ : Any , snake_case__ : Optional[int]=13 , snake_case__ : Any=7 , snake_case__ : Dict=True , snake_case__ : Dict=False , snake_case__ : Tuple=99 , snake_case__ : List[str]=16 , snake_case__ : str=2 , snake_case__ : List[str]=4 , snake_case__ : List[str]=4 , snake_case__ : Union[str, Any]="gelu" , snake_case__ : Dict=0.1 , snake_case__ : List[str]=0.1 , snake_case__ : Optional[int]=32 , snake_case__ : int=2 , snake_case__ : Union[str, Any]=1 , snake_case__ : Dict=0 , snake_case__ : List[str]=0.02 , ) -> Optional[Any]:
'''simple docstring'''
snake_case : Union[str, Any] = parent
snake_case : Dict = batch_size
snake_case : Any = seq_length
snake_case : List[Any] = is_training
snake_case : Optional[int] = use_labels
snake_case : str = vocab_size
snake_case : Tuple = hidden_size
snake_case : Any = num_hidden_layers
snake_case : str = num_attention_heads
snake_case : int = intermediate_size
snake_case : Optional[int] = hidden_act
snake_case : Tuple = hidden_dropout_prob
snake_case : Optional[Any] = attention_probs_dropout_prob
snake_case : Tuple = max_position_embeddings
snake_case : List[str] = eos_token_id
snake_case : List[Any] = pad_token_id
snake_case : Union[str, Any] = bos_token_id
snake_case : List[str] = initializer_range
def _SCREAMING_SNAKE_CASE (self : Any ) -> Optional[Any]:
'''simple docstring'''
snake_case : Dict = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
snake_case : List[str] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
snake_case : int = shift_tokens_right(snake_case__ , 1 , 2 )
snake_case : Union[str, Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=snake_case__ , )
snake_case : int = prepare_blenderbot_inputs_dict(snake_case__ , snake_case__ , snake_case__ )
return config, inputs_dict
def _SCREAMING_SNAKE_CASE (self : str ) -> Optional[int]:
'''simple docstring'''
snake_case , snake_case : Dict = self.prepare_config_and_inputs()
return config, inputs_dict
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : int , snake_case__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
snake_case : Union[str, Any] = 20
snake_case : int = model_class_name(snake_case__ )
snake_case : Dict = model.encode(inputs_dict["input_ids"] )
snake_case , snake_case : List[str] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
snake_case : Optional[int] = model.init_cache(decoder_input_ids.shape[0] , snake_case__ , snake_case__ )
snake_case : List[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
snake_case : List[str] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
snake_case : List[str] = model.decode(
decoder_input_ids[:, :-1] , snake_case__ , decoder_attention_mask=snake_case__ , past_key_values=snake_case__ , decoder_position_ids=snake_case__ , )
snake_case : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
snake_case : int = model.decode(
decoder_input_ids[:, -1:] , snake_case__ , decoder_attention_mask=snake_case__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=snake_case__ , )
snake_case : Union[str, Any] = model.decode(snake_case__ , snake_case__ )
snake_case : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def _SCREAMING_SNAKE_CASE (self : int , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : int ) -> Optional[Any]:
'''simple docstring'''
snake_case : Tuple = 20
snake_case : List[Any] = model_class_name(snake_case__ )
snake_case : Optional[Any] = model.encode(inputs_dict["input_ids"] )
snake_case , snake_case : Tuple = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
snake_case : List[str] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
snake_case : List[str] = model.init_cache(decoder_input_ids.shape[0] , snake_case__ , snake_case__ )
snake_case : Optional[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
snake_case : Optional[int] = model.decode(
decoder_input_ids[:, :-1] , snake_case__ , decoder_attention_mask=snake_case__ , past_key_values=snake_case__ , decoder_position_ids=snake_case__ , )
snake_case : Tuple = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
snake_case : Union[str, Any] = model.decode(
decoder_input_ids[:, -1:] , snake_case__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=snake_case__ , decoder_position_ids=snake_case__ , )
snake_case : Any = model.decode(snake_case__ , snake_case__ , decoder_attention_mask=snake_case__ )
snake_case : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
A__ : List[str] = 99
def _SCREAMING_SNAKE_CASE (self : Any ) -> int:
'''simple docstring'''
snake_case : Tuple = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
snake_case : Union[str, Any] = input_ids.shape[0]
snake_case : Union[str, Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def _SCREAMING_SNAKE_CASE (self : int ) -> Any:
'''simple docstring'''
snake_case , snake_case , snake_case : Optional[Any] = self._get_config_and_data()
snake_case : int = FlaxBlenderbotForConditionalGeneration(snake_case__ )
snake_case : Tuple = lm_model(input_ids=snake_case__ )
snake_case : List[str] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["logits"].shape , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case : Union[str, Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
snake_case : Any = FlaxBlenderbotForConditionalGeneration(snake_case__ )
snake_case : List[str] = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
snake_case : Optional[Any] = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
snake_case : List[str] = lm_model(input_ids=snake_case__ , decoder_input_ids=snake_case__ )
snake_case : Any = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["logits"].shape , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Optional[Any]:
'''simple docstring'''
snake_case : Union[str, Any] = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
snake_case : int = shift_tokens_right(snake_case__ , 1 , 2 )
snake_case : Tuple = np.equal(snake_case__ , 1 ).astype(np.floataa ).sum()
snake_case : Tuple = np.equal(snake_case__ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(snake_case__ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class UpperCAmelCase ( A_ ,unittest.TestCase ,A_ ):
A__ : Optional[Any] = True
A__ : Union[str, Any] = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
A__ : List[str] = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def _SCREAMING_SNAKE_CASE (self : Any ) -> Optional[Any]:
'''simple docstring'''
snake_case : List[str] = FlaxBlenderbotModelTester(self )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> str:
'''simple docstring'''
snake_case , snake_case : str = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(snake_case__ , snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Any ) -> Tuple:
'''simple docstring'''
snake_case , snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(snake_case__ , snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : int ) -> Tuple:
'''simple docstring'''
snake_case , snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
snake_case : Optional[int] = self._prepare_for_class(snake_case__ , snake_case__ )
snake_case : Tuple = model_class(snake_case__ )
@jax.jit
def encode_jitted(snake_case__ : Tuple , snake_case__ : Optional[Any]=None , **snake_case__ : int ):
return model.encode(input_ids=snake_case__ , attention_mask=snake_case__ )
with self.subTest("JIT Enabled" ):
snake_case : Union[str, Any] = encode_jitted(**snake_case__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
snake_case : str = encode_jitted(**snake_case__ ).to_tuple()
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) )
for jitted_output, output in zip(snake_case__ , snake_case__ ):
self.assertEqual(jitted_output.shape , output.shape )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case , snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
snake_case : List[Any] = model_class(snake_case__ )
snake_case : Union[str, Any] = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
snake_case : Union[str, Any] = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(snake_case__ : Any , snake_case__ : Dict , snake_case__ : Dict ):
return model.decode(
decoder_input_ids=snake_case__ , decoder_attention_mask=snake_case__ , encoder_outputs=snake_case__ , )
with self.subTest("JIT Enabled" ):
snake_case : Optional[int] = decode_jitted(**snake_case__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
snake_case : List[Any] = decode_jitted(**snake_case__ ).to_tuple()
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) )
for jitted_output, output in zip(snake_case__ , snake_case__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Any:
'''simple docstring'''
for model_class_name in self.all_model_classes:
snake_case : int = model_class_name.from_pretrained("facebook/blenderbot-400M-distill" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
snake_case : Optional[int] = np.ones((1, 1) ) * model.config.eos_token_id
snake_case : Optional[Any] = model(snake_case__ )
self.assertIsNotNone(snake_case__ )
@unittest.skipUnless(jax_device != "cpu" , "3B test too slow on CPU." )
@slow
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> str:
'''simple docstring'''
snake_case : Dict = {"num_beams": 1, "early_stopping": True, "min_length": 15, "max_length": 25}
snake_case : int = {"skip_special_tokens": True, "clean_up_tokenization_spaces": True}
snake_case : Tuple = FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-3B" , from_pt=snake_case__ )
snake_case : Union[str, Any] = BlenderbotTokenizer.from_pretrained("facebook/blenderbot-3B" )
snake_case : Tuple = ["Sam"]
snake_case : Dict = tokenizer(snake_case__ , return_tensors="jax" )
snake_case : Optional[Any] = model.generate(**snake_case__ , **snake_case__ )
snake_case : Any = "Sam is a great name. It means \"sun\" in Gaelic."
snake_case : Union[str, Any] = tokenizer.batch_decode(snake_case__ , **snake_case__ )
assert generated_txt[0].strip() == tgt_text
| 10 |
def UpperCamelCase ( __lowerCamelCase : str ):
snake_case : Union[str, Any] = 0
# if input_string is "aba" than new_input_string become "a|b|a"
snake_case : Tuple = ""
snake_case : Optional[int] = ""
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__lowerCamelCase ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
snake_case , snake_case : Tuple = 0, 0
# length[i] shows the length of palindromic substring with center i
snake_case : Any = [1 for i in range(len(__lowerCamelCase ) )]
# for each character in new_string find corresponding palindromic string
snake_case : int = 0
for j in range(len(__lowerCamelCase ) ):
snake_case : Optional[Any] = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__lowerCamelCase )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
snake_case : str = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
snake_case : List[str] = j - k + 1 # noqa: E741
snake_case : Dict = j + k - 1
# update max_length and start position
if max_length < length[j]:
snake_case : Optional[Any] = length[j]
snake_case : int = j
# create that string
snake_case : Any = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 1 |
def UpperCamelCase__ ( A__ ) -> tuple[int, int]:
try:
snake_case__ : List[str] = float(A__ )
except ValueError:
raise ValueError('Please enter a valid number' )
snake_case__ : str = decimal - int(A__ )
if fractional_part == 0:
return int(A__ ), 1
else:
snake_case__ : Optional[Any] = len(str(A__ ).split('.' )[1] )
snake_case__ : int = int(decimal * (10**number_of_frac_digits) )
snake_case__ : int = 10**number_of_frac_digits
snake_case__ , snake_case__ : List[str] = denominator, numerator
while True:
snake_case__ : List[Any] = dividend % divisor
if remainder == 0:
break
snake_case__ , snake_case__ : List[str] = divisor, remainder
snake_case__ , snake_case__ : Optional[int] = numerator / divisor, denominator / divisor
return int(A__ ), int(A__ )
if __name__ == "__main__":
print(F'''{decimal_to_fraction(2) = }''')
print(F'''{decimal_to_fraction(89.0) = }''')
print(F'''{decimal_to_fraction('67') = }''')
print(F'''{decimal_to_fraction('45.0') = }''')
print(F'''{decimal_to_fraction(1.5) = }''')
print(F'''{decimal_to_fraction('6.25') = }''')
print(F'''{decimal_to_fraction('78td') = }''')
| 143 | from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class __snake_case :
__lowerCamelCase = XGLMConfig
__lowerCamelCase = {}
__lowerCamelCase = """gelu"""
def __init__( self , __UpperCamelCase , __UpperCamelCase=14 , __UpperCamelCase=7 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=99 , __UpperCamelCase=32 , __UpperCamelCase=2 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=512 , __UpperCamelCase=0.0_2 , ) -> str:
'''simple docstring'''
snake_case__ : Any = parent
snake_case__ : Optional[int] = batch_size
snake_case__ : List[str] = seq_length
snake_case__ : Optional[Any] = is_training
snake_case__ : Optional[int] = use_input_mask
snake_case__ : Any = use_labels
snake_case__ : List[str] = vocab_size
snake_case__ : List[Any] = d_model
snake_case__ : List[str] = num_hidden_layers
snake_case__ : Optional[int] = num_attention_heads
snake_case__ : str = ffn_dim
snake_case__ : Optional[Any] = activation_function
snake_case__ : str = activation_dropout
snake_case__ : int = attention_dropout
snake_case__ : List[str] = max_position_embeddings
snake_case__ : Optional[int] = initializer_range
snake_case__ : List[str] = None
snake_case__ : List[str] = 0
snake_case__ : Optional[int] = 2
snake_case__ : Union[str, Any] = 1
def __a ( self ) -> List[str]:
'''simple docstring'''
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : List[str] = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
snake_case__ : int = None
if self.use_input_mask:
snake_case__ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : List[Any] = self.get_config()
snake_case__ : Optional[int] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def __a ( self ) -> Any:
'''simple docstring'''
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__UpperCamelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__UpperCamelCase , )
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : Any = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) : Tuple = config_and_inputs
snake_case__ : Tuple = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class __snake_case ( _lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ):
__lowerCamelCase = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
__lowerCamelCase = (TFXGLMForCausalLM,) if is_tf_available() else ()
__lowerCamelCase = (
{"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {}
)
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
def __a ( self ) -> str:
'''simple docstring'''
snake_case__ : Union[str, Any] = TFXGLMModelTester(self )
snake_case__ : Optional[int] = ConfigTester(self , config_class=__UpperCamelCase , n_embd=37 )
def __a ( self ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
@slow
def __a ( self ) -> Dict:
'''simple docstring'''
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Any = TFXGLMModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def __a ( self ) -> Any:
'''simple docstring'''
super().test_resize_token_embeddings()
@require_tf
class __snake_case ( unittest.TestCase ):
@slow
def __a ( self , __UpperCamelCase=True ) -> int:
'''simple docstring'''
snake_case__ : Optional[Any] = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
snake_case__ : Tuple = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
snake_case__ : List[str] = [2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581]
# fmt: on
snake_case__ : int = model.generate(__UpperCamelCase , do_sample=__UpperCamelCase , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , __UpperCamelCase )
@slow
def __a ( self ) -> List[str]:
'''simple docstring'''
snake_case__ : Any = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
snake_case__ : Dict = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
snake_case__ : Any = tokenizer('Today is a nice day and' , return_tensors='tf' )
snake_case__ : Dict = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
snake_case__ : Optional[int] = model.generate(__UpperCamelCase , do_sample=__UpperCamelCase , seed=[7, 0] )
snake_case__ : int = tokenizer.decode(output_ids[0] , skip_special_tokens=__UpperCamelCase )
snake_case__ : str = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
@slow
def __a ( self ) -> Dict:
'''simple docstring'''
snake_case__ : str = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
snake_case__ : Optional[int] = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
snake_case__ : Any = 'left'
# use different length sentences to test batching
snake_case__ : int = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
snake_case__ : Any = tokenizer(__UpperCamelCase , return_tensors='tf' , padding=__UpperCamelCase )
snake_case__ : List[Any] = inputs['input_ids']
snake_case__ : List[str] = model.generate(input_ids=__UpperCamelCase , attention_mask=inputs['attention_mask'] , max_new_tokens=12 )
snake_case__ : Union[str, Any] = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
snake_case__ : str = model.generate(input_ids=__UpperCamelCase , max_new_tokens=12 )
snake_case__ : int = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
snake_case__ : Dict = model.generate(input_ids=__UpperCamelCase , max_new_tokens=12 )
snake_case__ : List[Any] = tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
snake_case__ : Optional[int] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__UpperCamelCase )
snake_case__ : Dict = tokenizer.decode(output_padded[0] , skip_special_tokens=__UpperCamelCase )
snake_case__ : Union[str, Any] = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
self.assertListEqual(__UpperCamelCase , [non_padded_sentence, padded_sentence] )
| 143 | 1 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=A__ )
class _snake_case ( A__ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
_lowercase : str = field(default='''question-answering-extractive''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
_lowercase : ClassVar[Features] = Features({'''question''': Value('''string''' ), '''context''': Value('''string''' )} )
_lowercase : ClassVar[Features] = Features(
{
'''answers''': Sequence(
{
'''text''': Value('''string''' ),
'''answer_start''': Value('''int32''' ),
} )
} )
_lowercase : str = "question"
_lowercase : str = "context"
_lowercase : str = "answers"
@property
def SCREAMING_SNAKE_CASE__ ( self) -> Dict[str, str]:
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 365 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
a_ : Tuple = '\\n@inproceedings{lin-2004-rouge,\n title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",\n author = "Lin, Chin-Yew",\n booktitle = "Text Summarization Branches Out",\n month = jul,\n year = "2004",\n address = "Barcelona, Spain",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W04-1013",\n pages = "74--81",\n}\n'
a_ : List[Any] = '\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n'
a_ : List[str] = '\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,\n `"rougeL"`: Longest common subsequence based scoring.\n `"rougeLSum"`: rougeLsum splits text using `"\n"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results["rouge1"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results["rouge1"].mid.fmeasure)\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def SCREAMING_SNAKE_CASE__ ( self , a , a , a=None , a=True , a=False) -> Optional[Any]:
if rouge_types is None:
SCREAMING_SNAKE_CASE = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
SCREAMING_SNAKE_CASE = rouge_scorer.RougeScorer(rouge_types=a , use_stemmer=a)
if use_aggregator:
SCREAMING_SNAKE_CASE = scoring.BootstrapAggregator()
else:
SCREAMING_SNAKE_CASE = []
for ref, pred in zip(a , a):
SCREAMING_SNAKE_CASE = scorer.score(a , a)
if use_aggregator:
aggregator.add_scores(a)
else:
scores.append(a)
if use_aggregator:
SCREAMING_SNAKE_CASE = aggregator.aggregate()
else:
SCREAMING_SNAKE_CASE = {}
for key in scores[0]:
SCREAMING_SNAKE_CASE = [score[key] for score in scores]
return result
| 327 | 0 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Union[str, Any] = '''ZinengTang/tvlt-base'''
_UpperCAmelCase : Dict = tempfile.mkdtemp()
def __lowerCAmelCase ( self , **A ) -> int:
return TvltImageProcessor.from_pretrained(self.checkpoint , **A )
def __lowerCAmelCase ( self , **A ) -> Optional[Any]:
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **A )
def __lowerCAmelCase ( self ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self ) -> Any:
_UpperCAmelCase : str = self.get_image_processor()
_UpperCAmelCase : Optional[Any] = self.get_feature_extractor()
_UpperCAmelCase : Tuple = TvltProcessor(image_processor=A , feature_extractor=A )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase : int = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , A )
self.assertIsInstance(processor.image_processor , A )
def __lowerCAmelCase ( self ) -> Optional[Any]:
_UpperCAmelCase : List[str] = self.get_image_processor()
_UpperCAmelCase : int = self.get_feature_extractor()
_UpperCAmelCase : List[str] = TvltProcessor(image_processor=A , feature_extractor=A )
_UpperCAmelCase : Tuple = np.ones([1_2_0_0_0] )
_UpperCAmelCase : Tuple = feature_extractor(A , return_tensors='''np''' )
_UpperCAmelCase : List[str] = processor(audio=A , return_tensors='''np''' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __lowerCAmelCase ( self ) -> Optional[int]:
_UpperCAmelCase : Union[str, Any] = self.get_image_processor()
_UpperCAmelCase : Dict = self.get_feature_extractor()
_UpperCAmelCase : Dict = TvltProcessor(image_processor=A , feature_extractor=A )
_UpperCAmelCase : Optional[int] = np.ones([3, 2_2_4, 2_2_4] )
_UpperCAmelCase : Tuple = image_processor(A , return_tensors='''np''' )
_UpperCAmelCase : List[Any] = processor(images=A , return_tensors='''np''' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Optional[int] = self.get_image_processor()
_UpperCAmelCase : Union[str, Any] = self.get_feature_extractor()
_UpperCAmelCase : Optional[int] = TvltProcessor(image_processor=A , feature_extractor=A )
_UpperCAmelCase : List[Any] = np.ones([1_2_0_0_0] )
_UpperCAmelCase : Tuple = np.ones([3, 2_2_4, 2_2_4] )
_UpperCAmelCase : int = processor(audio=A , images=A )
self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] )
# test if it raises when no input is passed
with pytest.raises(A ):
processor()
def __lowerCAmelCase ( self ) -> Optional[int]:
_UpperCAmelCase : Any = self.get_image_processor()
_UpperCAmelCase : int = self.get_feature_extractor()
_UpperCAmelCase : List[str] = TvltProcessor(image_processor=A , feature_extractor=A )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
| 263 |
"""simple docstring"""
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int ):
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
_UpperCAmelCase : List[str] = str(bin(UpperCamelCase__ ) )[2:] # remove the leading "0b"
_UpperCAmelCase : str = str(bin(UpperCamelCase__ ) )[2:]
_UpperCAmelCase : List[str] = max(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(UpperCamelCase__ ) , b_binary.zfill(UpperCamelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 263 | 1 |
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
_A = pytest.mark.integration
@require_faiss
class _lowercase ( __UpperCAmelCase ):
def _UpperCamelCase ( self ) -> Union[str, Any]:
lowerCamelCase : Any = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(UpperCAmelCase_ ) for x in np.arange(30 ).tolist()]} )
return dset
def _UpperCamelCase ( self ) -> List[Any]:
import faiss
lowerCamelCase : Dataset = self._create_dummy_dataset()
lowerCamelCase : Optional[int] = dset.map(
lambda UpperCAmelCase_ , UpperCAmelCase_ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=UpperCAmelCase_ , keep_in_memory=UpperCAmelCase_ )
lowerCamelCase : Dict = dset.add_faiss_index('vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCamelCase , lowerCamelCase : List[str] = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def _UpperCamelCase ( self ) -> Tuple:
import faiss
lowerCamelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
lowerCamelCase , lowerCamelCase : str = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def _UpperCamelCase ( self ) -> int:
import faiss
lowerCamelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=UpperCAmelCase_ ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase , lowerCamelCase : List[str] = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def _UpperCamelCase ( self ) -> Any:
lowerCamelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(UpperCAmelCase_ , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def _UpperCamelCase ( self ) -> Union[str, Any]:
from elasticsearch import Elasticsearch
lowerCamelCase : Dataset = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
lowerCamelCase : Tuple = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30 )
lowerCamelCase : int = {'hits': {'hits': [{'_score': 1, '_id': 29}]}}
lowerCamelCase : Optional[Any] = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=UpperCAmelCase_ )
lowerCamelCase , lowerCamelCase : str = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class _lowercase ( __UpperCAmelCase ):
def _UpperCamelCase ( self ) -> Union[str, Any]:
import faiss
lowerCamelCase : str = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
lowerCamelCase : Optional[int] = np.zeros(5 , dtype=np.floataa )
lowerCamelCase : List[str] = 1
lowerCamelCase , lowerCamelCase : int = index.search(UpperCAmelCase_ )
self.assertRaises(UpperCAmelCase_ , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
lowerCamelCase : Tuple = np.eye(5 , dtype=np.floataa )[::-1]
lowerCamelCase , lowerCamelCase : List[str] = index.search_batch(UpperCAmelCase_ )
self.assertRaises(UpperCAmelCase_ , index.search_batch , queries[0] )
lowerCamelCase : List[str] = [scores[0] for scores in total_scores]
lowerCamelCase : List[str] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCAmelCase_ ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , UpperCAmelCase_ )
def _UpperCamelCase ( self ) -> Dict:
import faiss
lowerCamelCase : List[Any] = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
lowerCamelCase : int = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(UpperCAmelCase_ ):
lowerCamelCase : str = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def _UpperCamelCase ( self ) -> Any:
import faiss
lowerCamelCase : Any = faiss.IndexFlat(5 )
lowerCamelCase : Any = FaissIndex(custom_index=UpperCAmelCase_ )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def _UpperCamelCase ( self ) -> Any:
import faiss
lowerCamelCase : Any = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=UpperCAmelCase_ ) as tmp_file:
index.save(tmp_file.name )
lowerCamelCase : List[str] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase : Dict = np.zeros(5 , dtype=np.floataa )
lowerCamelCase : Optional[Any] = 1
lowerCamelCase , lowerCamelCase : str = index.search(UpperCAmelCase_ )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def UpperCAmelCase ( a_ ):
'''simple docstring'''
import faiss
lowerCamelCase : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5, dtype=np.floataa ) )
lowerCamelCase : Union[str, Any] = 'index.faiss'
lowerCamelCase : List[Any] = F"""mock://{index_name}"""
index.save(a_, storage_options=mockfs.storage_options )
lowerCamelCase : Optional[int] = FaissIndex.load(a_, storage_options=mockfs.storage_options )
lowerCamelCase : str = np.zeros(5, dtype=np.floataa )
lowerCamelCase : str = 1
lowerCamelCase , lowerCamelCase : int = index.search(a_ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _lowercase ( __UpperCAmelCase ):
def _UpperCamelCase ( self ) -> int:
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
lowerCamelCase : Union[str, Any] = Elasticsearch()
lowerCamelCase : Optional[Any] = {'acknowledged': True}
lowerCamelCase : str = ElasticSearchIndex(es_client=UpperCAmelCase_ )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
lowerCamelCase : Tuple = 'foo'
lowerCamelCase : List[str] = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
lowerCamelCase , lowerCamelCase : Any = index.search(UpperCAmelCase_ )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
lowerCamelCase : Dict = 'foo'
lowerCamelCase : Optional[Any] = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
lowerCamelCase , lowerCamelCase : Optional[Any] = index.search(UpperCAmelCase_ , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
lowerCamelCase : str = ['foo', 'bar', 'foobar']
lowerCamelCase : Union[str, Any] = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
lowerCamelCase , lowerCamelCase : Optional[int] = index.search_batch(UpperCAmelCase_ )
lowerCamelCase : Dict = [scores[0] for scores in total_scores]
lowerCamelCase : Optional[int] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCAmelCase_ ) , 0 )
self.assertListEqual([1, 1, 1] , UpperCAmelCase_ )
# batched queries with timeout
lowerCamelCase : List[str] = ['foo', 'bar', 'foobar']
lowerCamelCase : str = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
lowerCamelCase , lowerCamelCase : Dict = index.search_batch(UpperCAmelCase_ , request_timeout=30 )
lowerCamelCase : Dict = [scores[0] for scores in total_scores]
lowerCamelCase : int = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCAmelCase_ ) , 0 )
self.assertListEqual([1, 1, 1] , UpperCAmelCase_ )
| 205 |
"""simple docstring"""
import numpy as np
def UpperCAmelCase ( a_, a_, a_ = 1E-12, a_ = 100, ):
'''simple docstring'''
assert np.shape(a_ )[0] == np.shape(a_ )[1]
# Ensure proper dimensionality.
assert np.shape(a_ )[0] == np.shape(a_ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(a_ ) == np.iscomplexobj(a_ )
lowerCamelCase : Optional[int] = np.iscomplexobj(a_ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(a_, input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : List[str] = 0
lowerCamelCase : Any = 0
lowerCamelCase : Dict = 1E12
while not convergence:
# Multiple matrix by the vector.
lowerCamelCase : Optional[int] = np.dot(a_, a_ )
# Normalize the resulting output vector.
lowerCamelCase : Optional[int] = w / np.linalg.norm(a_ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
lowerCamelCase : Optional[Any] = vector.conj().T if is_complex else vector.T
lowerCamelCase : str = np.dot(a_, np.dot(a_, a_ ) )
# Check convergence.
lowerCamelCase : Optional[int] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
lowerCamelCase : int = True
lowerCamelCase : Optional[Any] = lambda_
if is_complex:
lowerCamelCase : Any = np.real(lambda_ )
return lambda_, vector
def UpperCAmelCase ( ):
'''simple docstring'''
lowerCamelCase : str = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
lowerCamelCase : str = np.array([41, 4, 20] )
lowerCamelCase : Optional[Any] = real_input_matrix.astype(np.complexaaa )
lowerCamelCase : Dict = np.triu(1j * complex_input_matrix, 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
lowerCamelCase : List[Any] = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
lowerCamelCase : str = real_input_matrix
lowerCamelCase : Any = real_vector
elif problem_type == "complex":
lowerCamelCase : str = complex_input_matrix
lowerCamelCase : Dict = complex_vector
# Our implementation.
lowerCamelCase , lowerCamelCase : List[str] = power_iteration(a_, a_ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
lowerCamelCase , lowerCamelCase : Optional[Any] = np.linalg.eigh(a_ )
# Last eigenvalue is the maximum one.
lowerCamelCase : Dict = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
lowerCamelCase : List[str] = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(a_ ) - np.abs(a_ ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 205 | 1 |
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase : Tuple =logging.get_logger(__name__)
_lowercase : int ={
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
}
_lowercase : Union[str, Any] ={
"vocab_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"},
"merges_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"},
}
_lowercase : int ={
"ctrl": 256,
}
_lowercase : str ={
"Pregnancy": 16_8629,
"Christianity": 7675,
"Explain": 10_6423,
"Fitness": 6_3440,
"Saving": 6_3163,
"Ask": 2_7171,
"Ass": 9_5985,
"Joke": 16_3509,
"Questions": 4_5622,
"Thoughts": 4_9605,
"Retail": 5_2342,
"Feminism": 16_4338,
"Writing": 1_1992,
"Atheism": 19_2263,
"Netflix": 4_8616,
"Computing": 3_9639,
"Opinion": 4_3213,
"Alone": 4_4967,
"Funny": 5_8917,
"Gaming": 4_0358,
"Human": 4088,
"India": 1331,
"Joker": 7_7138,
"Diet": 3_6206,
"Legal": 1_1859,
"Norman": 4939,
"Tip": 7_2689,
"Weight": 5_2343,
"Movies": 4_6273,
"Running": 2_3425,
"Science": 2090,
"Horror": 3_7793,
"Confession": 6_0572,
"Finance": 1_2250,
"Politics": 1_6360,
"Scary": 19_1985,
"Support": 1_2654,
"Technologies": 3_2516,
"Teenage": 6_6160,
"Event": 3_2769,
"Learned": 6_7460,
"Notion": 18_2770,
"Wikipedia": 3_7583,
"Books": 6665,
"Extract": 7_6050,
"Confessions": 10_2701,
"Conspiracy": 7_5932,
"Links": 6_3674,
"Narcissus": 15_0425,
"Relationship": 5_4766,
"Relationships": 13_4796,
"Reviews": 4_1671,
"News": 4256,
"Translation": 2_6820,
"multilingual": 12_8406,
}
def lowerCAmelCase_ ( _lowercase : List[str]) -> Tuple:
"""simple docstring"""
a__ : Union[str, Any] = set()
a__ : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
a__ : Union[str, Any] = char
a__ : List[Any] = set(_lowercase)
return pairs
class snake_case__ (A__ ):
"""simple docstring"""
__lowerCAmelCase :str = VOCAB_FILES_NAMES
__lowerCAmelCase :Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase :int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase :List[Any] = CONTROL_CODES
def __init__( self , __lowercase , __lowercase , __lowercase="<unk>" , **__lowercase ) -> Any:
"""simple docstring"""
super().__init__(unk_token=__lowercase , **__lowercase )
with open(__lowercase , encoding="""utf-8""" ) as vocab_handle:
a__ : List[Any] = json.load(__lowercase )
a__ : Optional[int] = {v: k for k, v in self.encoder.items()}
with open(__lowercase , encoding="""utf-8""" ) as merges_handle:
a__ : Optional[Any] = merges_handle.read().split("""\n""" )[1:-1]
a__ : Tuple = [tuple(merge.split() ) for merge in merges]
a__ : str = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
a__ : List[Any] = {}
@property
def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]:
"""simple docstring"""
return len(self.encoder )
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Any:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
a__ : str = tuple(__lowercase )
a__ : Union[str, Any] = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
a__ : List[str] = get_pairs(__lowercase )
if not pairs:
return token
while True:
a__ : Tuple = min(__lowercase , key=lambda __lowercase : self.bpe_ranks.get(__lowercase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
a__ , a__ : int = bigram
a__ : str = []
a__ : str = 0
while i < len(__lowercase ):
try:
a__ : Optional[Any] = word.index(__lowercase , __lowercase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
a__ : Optional[Any] = j
if word[i] == first and i < len(__lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
a__ : Tuple = tuple(__lowercase )
a__ : Any = new_word
if len(__lowercase ) == 1:
break
else:
a__ : Optional[int] = get_pairs(__lowercase )
a__ : Optional[Any] = """@@ """.join(__lowercase )
a__ : Optional[Any] = word[:-4]
a__ : List[Any] = word
return word
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Union[str, Any]:
"""simple docstring"""
a__ : Dict = []
a__ : Optional[Any] = re.findall(r"""\S+\n?""" , __lowercase )
for token in words:
split_tokens.extend(list(self.bpe(__lowercase ).split(""" """ ) ) )
return split_tokens
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Optional[int]:
"""simple docstring"""
return self.encoder.get(__lowercase , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> int:
"""simple docstring"""
return self.decoder.get(__lowercase , self.unk_token )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> List[Any]:
"""simple docstring"""
a__ : str = """ """.join(__lowercase ).replace("""@@ """ , """""" ).strip()
return out_string
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__lowercase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
a__ : Optional[int] = os.path.join(
__lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
a__ : List[Any] = os.path.join(
__lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(__lowercase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowercase , ensure_ascii=__lowercase ) + """\n""" )
a__ : int = 0
with open(__lowercase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowercase : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
a__ : List[Any] = token_index
writer.write(""" """.join(__lowercase ) + """\n""" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 170 |
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase : int =logging.get_logger(__name__)
_lowercase : Union[str, Any] ={
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
_lowercase : Dict ={
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
_lowercase : Any ={"facebook/blenderbot_small-90M": 512}
def lowerCAmelCase_ ( _lowercase : Any) -> Optional[Any]:
"""simple docstring"""
a__ : List[str] = set()
a__ : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
a__ : Optional[Any] = char
a__ : Tuple = set(_lowercase)
return pairs
class snake_case__ (A__ ):
"""simple docstring"""
__lowerCAmelCase :List[Any] = VOCAB_FILES_NAMES
__lowerCAmelCase :Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase :List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase :Any = ["input_ids", "attention_mask"]
def __init__( self , __lowercase , __lowercase , __lowercase="__start__" , __lowercase="__end__" , __lowercase="__unk__" , __lowercase="__null__" , **__lowercase , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(unk_token=__lowercase , bos_token=__lowercase , eos_token=__lowercase , pad_token=__lowercase , **__lowercase )
with open(__lowercase , encoding="""utf-8""" ) as vocab_handle:
a__ : Optional[int] = json.load(__lowercase )
a__ : str = {v: k for k, v in self.encoder.items()}
with open(__lowercase , encoding="""utf-8""" ) as merges_handle:
a__ : Any = merges_handle.read().split("""\n""" )[1:-1]
a__ : Optional[Any] = [tuple(merge.split() ) for merge in merges]
a__ : Dict = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
a__ : Dict = {}
@property
def SCREAMING_SNAKE_CASE__( self ) -> int:
"""simple docstring"""
return len(self.encoder )
def SCREAMING_SNAKE_CASE__( self ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> str:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
a__ : Any = re.sub("""([.,!?()])""" , r""" \1""" , __lowercase )
a__ : int = re.sub("""(')""" , r""" \1 """ , __lowercase )
a__ : Tuple = re.sub(r"""\s{2,}""" , """ """ , __lowercase )
if "\n" in token:
a__ : Union[str, Any] = token.replace("""\n""" , """ __newln__""" )
a__ : Optional[int] = token.split(""" """ )
a__ : Union[str, Any] = []
for token in tokens:
if not len(__lowercase ):
continue
a__ : Union[str, Any] = token.lower()
a__ : List[Any] = tuple(__lowercase )
a__ : Optional[int] = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
a__ : Any = get_pairs(__lowercase )
if not pairs:
words.append(__lowercase )
continue
while True:
a__ : Optional[int] = min(__lowercase , key=lambda __lowercase : self.bpe_ranks.get(__lowercase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
a__ , a__ : str = bigram
a__ : str = []
a__ : Optional[Any] = 0
while i < len(__lowercase ):
try:
a__ : Tuple = word.index(__lowercase , __lowercase )
new_word.extend(word[i:j] )
a__ : Optional[Any] = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(__lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
a__ : List[Any] = tuple(__lowercase )
a__ : Any = new_word
if len(__lowercase ) == 1:
break
else:
a__ : Optional[int] = get_pairs(__lowercase )
a__ : List[Any] = """@@ """.join(__lowercase )
a__ : Optional[Any] = word[:-4]
a__ : Any = word
words.append(__lowercase )
return " ".join(__lowercase )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> List[str]:
"""simple docstring"""
a__ : Dict = []
a__ : Optional[Any] = re.findall(r"""\S+\n?""" , __lowercase )
for token in words:
split_tokens.extend(list(self.bpe(__lowercase ).split(""" """ ) ) )
return split_tokens
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> int:
"""simple docstring"""
a__ : Tuple = token.lower()
return self.encoder.get(__lowercase , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> str:
"""simple docstring"""
return self.decoder.get(__lowercase , self.unk_token )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> str:
"""simple docstring"""
a__ : int = """ """.join(__lowercase ).replace("""@@ """ , """""" ).strip()
return out_string
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__lowercase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
a__ : Dict = os.path.join(
__lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
a__ : List[Any] = os.path.join(
__lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(__lowercase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowercase , ensure_ascii=__lowercase ) + """\n""" )
a__ : List[str] = 0
with open(__lowercase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowercase : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
a__ : Optional[int] = token_index
writer.write(""" """.join(__lowercase ) + """\n""" )
index += 1
return vocab_file, merge_file
| 170 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A : List[Any] = logging.get_logger(__name__)
A : Optional[Any] = {
'microsoft/table-transformer-detection': (
'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'
),
}
class __A( a ):
snake_case_ = '''table-transformer'''
snake_case_ = ['''past_key_values''']
snake_case_ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , _snake_case=True , _snake_case=None , _snake_case=3 , _snake_case=100 , _snake_case=6 , _snake_case=2_048 , _snake_case=8 , _snake_case=6 , _snake_case=2_048 , _snake_case=8 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=True , _snake_case="relu" , _snake_case=256 , _snake_case=0.1 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=1.0 , _snake_case=False , _snake_case="sine" , _snake_case="resnet50" , _snake_case=True , _snake_case=False , _snake_case=1 , _snake_case=5 , _snake_case=2 , _snake_case=1 , _snake_case=1 , _snake_case=5 , _snake_case=2 , _snake_case=0.1 , **_snake_case , ) -> int:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__a = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(_snake_case , _snake_case ):
__a = backbone_config.get('''model_type''' )
__a = CONFIG_MAPPING[backbone_model_type]
__a = config_class.from_dict(_snake_case )
# set timm attributes to None
__a , __a , __a = None, None, None
__a = use_timm_backbone
__a = backbone_config
__a = num_channels
__a = num_queries
__a = d_model
__a = encoder_ffn_dim
__a = encoder_layers
__a = encoder_attention_heads
__a = decoder_ffn_dim
__a = decoder_layers
__a = decoder_attention_heads
__a = dropout
__a = attention_dropout
__a = activation_dropout
__a = activation_function
__a = init_std
__a = init_xavier_std
__a = encoder_layerdrop
__a = decoder_layerdrop
__a = encoder_layers
__a = auxiliary_loss
__a = position_embedding_type
__a = backbone
__a = use_pretrained_backbone
__a = dilation
# Hungarian matcher
__a = class_cost
__a = bbox_cost
__a = giou_cost
# Loss coefficients
__a = mask_loss_coefficient
__a = dice_loss_coefficient
__a = bbox_loss_coefficient
__a = giou_loss_coefficient
__a = eos_coefficient
super().__init__(is_encoder_decoder=_snake_case , **_snake_case )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
return self.d_model
class __A( a ):
snake_case_ = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> float:
'''simple docstring'''
return 1E-5
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
return 12 | 365 |
import functools
def __lowerCAmelCase ( a__ , a__ ) -> int:
__a = len(a__ )
__a = len(a__ )
@functools.cache
def min_distance(a__ , a__ ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
__a = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , a__ ) , 1 + min_distance(a__ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 33 | 0 |
"""simple docstring"""
from pathlib import Path
import fire
from tqdm import tqdm
def lowercase ( _snake_case : int="ro" , _snake_case : Dict="en" , _snake_case : int="wmt16" , _snake_case : List[str]=None ) ->None:
"""simple docstring"""
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('''run pip install datasets''' )
__snake_case : Union[str, Any] = f"""{src_lang}-{tgt_lang}"""
print(f"""Converting {dataset}-{pair}""" )
__snake_case : Optional[Any] = datasets.load_dataset(_snake_case , _snake_case )
if save_dir is None:
__snake_case : int = f"""{dataset}-{pair}"""
__snake_case : Union[str, Any] = Path(_snake_case )
save_dir.mkdir(exist_ok=_snake_case )
for split in ds.keys():
print(f"""Splitting {split} with {ds[split].num_rows} records""" )
# to save to val.source, val.target like summary datasets
__snake_case : Union[str, Any] = '''val''' if split == '''validation''' else split
__snake_case : List[str] = save_dir.joinpath(f"""{fn}.source""" )
__snake_case : int = save_dir.joinpath(f"""{fn}.target""" )
__snake_case : Union[str, Any] = src_path.open('''w+''' )
__snake_case : Union[str, Any] = tgt_path.open('''w+''' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
__snake_case : List[str] = x['''translation''']
src_fp.write(ex[src_lang] + '''\n''' )
tgt_fp.write(ex[tgt_lang] + '''\n''' )
print(f"""Saved {dataset} dataset to {save_dir}""" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 102 |
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = [1]
for i in range(2 ,UpperCamelCase_ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
snake_case = []
snake_case = list(range(UpperCamelCase_ ) )
# Find permutation
while factorials:
snake_case = factorials.pop()
snake_case , snake_case = divmod(UpperCamelCase_ ,UpperCamelCase_ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 127 | 0 |
"""simple docstring"""
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
_a : Any = namedtuple(
'_TestCommandArgs',
[
'dataset',
'name',
'cache_dir',
'data_dir',
'all_configs',
'save_infos',
'ignore_verifications',
'force_redownload',
'clear_cache',
],
defaults=[None, None, None, False, False, False, False, False],
)
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : Union[str, Any] ) -> int:
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> str:
_lowerCAmelCase : str = _TestCommandArgs(dataset=A__ ,all_configs=A__ ,save_infos=A__ )
_lowerCAmelCase : str = TestCommand(*A__ )
test_command.run()
_lowerCAmelCase : Any = os.path.join(A__ ,"""README.md""" )
assert os.path.exists(A__ )
_lowerCAmelCase : Tuple = DatasetInfosDict.from_directory(A__ )
_lowerCAmelCase : Optional[int] = DatasetInfosDict(
{
"""default""": DatasetInfo(
features=Features(
{
"""tokens""": Sequence(Value("""string""" ) ),
"""ner_tags""": Sequence(
ClassLabel(names=["""O""", """B-PER""", """I-PER""", """B-ORG""", """I-ORG""", """B-LOC""", """I-LOC"""] ) ),
"""langs""": Sequence(Value("""string""" ) ),
"""spans""": Sequence(Value("""string""" ) ),
} ) ,splits=[
{
"""name""": """train""",
"""num_bytes""": 2351563,
"""num_examples""": 10000,
},
{
"""name""": """validation""",
"""num_bytes""": 238418,
"""num_examples""": 1000,
},
] ,download_size=3940680 ,dataset_size=2589981 ,)
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = getattr(dataset_infos["""default"""] ,A__ ), getattr(expected_dataset_infos["""default"""] ,A__ )
if key == "num_bytes":
assert is_apercent_close(A__ ,A__ )
elif key == "splits":
assert list(A__ ) == list(A__ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes ,expected[split].num_bytes )
else:
result == expected
| 357 | """simple docstring"""
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ,_lowerCamelCase : Any ,_lowerCamelCase : Optional[Any] ) -> str:
_lowerCAmelCase : str = AutoConfig.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : int = FlaxAutoModelForSeqaSeqLM.from_config(config=_lowerCamelCase )
_lowerCAmelCase : Any = checkpoints.load_tax_checkpoint(_lowerCamelCase )
_lowerCAmelCase : Tuple = """wi_0""" in tax_model["""target"""]["""encoder"""]["""layers_0"""]["""mlp"""]
if config.model_type == "t5":
_lowerCAmelCase : Tuple = """SelfAttention"""
if config.model_type == "longt5" and config.encoder_attention_type == "local":
_lowerCAmelCase : Optional[Any] = """LocalSelfAttention"""
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_lowerCAmelCase : Union[str, Any] = """TransientGlobalSelfAttention"""
else:
raise ValueError(
"""Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"""
""" attribute with a value from ['local', 'transient-global].""" )
# Encoder
for layer_index in range(config.num_layers ):
_lowerCAmelCase : Tuple = f"layers_{str(_lowerCamelCase )}"
# Self-Attention
_lowerCAmelCase : List[str] = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""key"""]["""kernel"""]
_lowerCAmelCase : str = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""out"""]["""kernel"""]
_lowerCAmelCase : str = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""query"""]["""kernel"""]
_lowerCAmelCase : Any = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""value"""]["""kernel"""]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_lowerCAmelCase : Dict = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""T5LayerNorm_0"""]["""scale"""]
# Layer Normalization
_lowerCAmelCase : Any = tax_model["""target"""]["""encoder"""][layer_name]["""pre_attention_layer_norm"""]["""scale"""]
if split_mlp_wi:
_lowerCAmelCase : List[str] = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi_0"""]["""kernel"""]
_lowerCAmelCase : int = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi_1"""]["""kernel"""]
else:
_lowerCAmelCase : List[str] = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi"""]["""kernel"""]
_lowerCAmelCase : Tuple = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wo"""]["""kernel"""]
# Layer Normalization
_lowerCAmelCase : Tuple = tax_model["""target"""]["""encoder"""][layer_name]["""pre_mlp_layer_norm"""]["""scale"""]
# Assigning
_lowerCAmelCase : Any = flax_model.params["""encoder"""]["""block"""][str(_lowerCamelCase )]["""layer"""]
_lowerCAmelCase : Any = tax_attention_key
_lowerCAmelCase : str = tax_attention_out
_lowerCAmelCase : Union[str, Any] = tax_attention_query
_lowerCAmelCase : Optional[Any] = tax_attention_value
_lowerCAmelCase : List[str] = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_lowerCAmelCase : Any = tax_global_layer_norm
if split_mlp_wi:
_lowerCAmelCase : Dict = tax_mlp_wi_a
_lowerCAmelCase : List[Any] = tax_mlp_wi_a
else:
_lowerCAmelCase : List[str] = tax_mlp_wi
_lowerCAmelCase : str = tax_mlp_wo
_lowerCAmelCase : Optional[Any] = tax_mlp_layer_norm
_lowerCAmelCase : Any = flax_model_encoder_layer_block
# Only for layer 0:
_lowerCAmelCase : Union[str, Any] = tax_model["""target"""]["""encoder"""]["""relpos_bias"""]["""rel_embedding"""].T
_lowerCAmelCase : Optional[Any] = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_lowerCAmelCase : List[str] = tax_model["""target"""]["""encoder"""]["""side_relpos_bias"""]["""rel_embedding"""].T
_lowerCAmelCase : Optional[int] = tax_encoder_global_rel_embedding
# Assigning
_lowerCAmelCase : Any = tax_model["""target"""]["""encoder"""]["""encoder_norm"""]["""scale"""]
_lowerCAmelCase : Any = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
_lowerCAmelCase : Optional[int] = f"layers_{str(_lowerCamelCase )}"
# Self-Attention
_lowerCAmelCase : List[str] = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""key"""]["""kernel"""]
_lowerCAmelCase : int = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""out"""]["""kernel"""]
_lowerCAmelCase : List[Any] = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""query"""]["""kernel"""]
_lowerCAmelCase : str = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""value"""]["""kernel"""]
# Layer Normalization
_lowerCAmelCase : Optional[Any] = tax_model["""target"""]["""decoder"""][layer_name]["""pre_self_attention_layer_norm"""][
"""scale"""
]
# Encoder-Decoder-Attention
_lowerCAmelCase : List[Any] = tax_model["""target"""]["""decoder"""][layer_name]["""encoder_decoder_attention"""]
_lowerCAmelCase : List[str] = tax_enc_dec_attention_module["""key"""]["""kernel"""]
_lowerCAmelCase : List[Any] = tax_enc_dec_attention_module["""out"""]["""kernel"""]
_lowerCAmelCase : List[str] = tax_enc_dec_attention_module["""query"""]["""kernel"""]
_lowerCAmelCase : Dict = tax_enc_dec_attention_module["""value"""]["""kernel"""]
# Layer Normalization
_lowerCAmelCase : Any = tax_model["""target"""]["""decoder"""][layer_name]["""pre_cross_attention_layer_norm"""]["""scale"""]
# MLP
if split_mlp_wi:
_lowerCAmelCase : Any = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi_0"""]["""kernel"""]
_lowerCAmelCase : List[str] = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi_1"""]["""kernel"""]
else:
_lowerCAmelCase : Any = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi"""]["""kernel"""]
_lowerCAmelCase : Optional[int] = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wo"""]["""kernel"""]
# Layer Normalization
_lowerCAmelCase : Optional[int] = tax_model["""target"""]["""decoder"""][layer_name]["""pre_mlp_layer_norm"""]["""scale"""]
# Assigning
_lowerCAmelCase : str = flax_model.params["""decoder"""]["""block"""][str(_lowerCamelCase )]["""layer"""]
_lowerCAmelCase : int = tax_attention_key
_lowerCAmelCase : List[str] = tax_attention_out
_lowerCAmelCase : Optional[Any] = tax_attention_query
_lowerCAmelCase : Dict = tax_attention_value
_lowerCAmelCase : str = tax_pre_attention_layer_norm
_lowerCAmelCase : List[Any] = tax_enc_dec_attention_key
_lowerCAmelCase : List[Any] = tax_enc_dec_attention_out
_lowerCAmelCase : Tuple = tax_enc_dec_attention_query
_lowerCAmelCase : Any = tax_enc_dec_attention_value
_lowerCAmelCase : Dict = tax_cross_layer_norm
if split_mlp_wi:
_lowerCAmelCase : Dict = tax_mlp_wi_a
_lowerCAmelCase : int = tax_mlp_wi_a
else:
_lowerCAmelCase : Optional[int] = tax_mlp_wi
_lowerCAmelCase : Dict = tax_mlp_wo
_lowerCAmelCase : List[Any] = txa_mlp_layer_norm
_lowerCAmelCase : Optional[Any] = flax_model_decoder_layer_block
# Decoder Normalization
_lowerCAmelCase : Any = tax_model["""target"""]["""decoder"""]["""decoder_norm"""]["""scale"""]
_lowerCAmelCase : List[str] = txa_decoder_norm
# Only for layer 0:
_lowerCAmelCase : Optional[int] = tax_model["""target"""]["""decoder"""]["""relpos_bias"""]["""rel_embedding"""].T
_lowerCAmelCase : Union[str, Any] = tax_decoder_rel_embedding
# Token Embeddings
_lowerCAmelCase : Optional[int] = tax_model["""target"""]["""token_embedder"""]["""embedding"""]
_lowerCAmelCase : Optional[int] = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
_lowerCAmelCase : Tuple = tax_model["""target"""]["""decoder"""]["""logits_dense"""]["""kernel"""]
flax_model.save_pretrained(_lowerCamelCase )
print("""T5X Model was sucessfully converted!""" )
if __name__ == "__main__":
_a : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.'
)
parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.')
parser.add_argument(
'--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.'
)
_a : List[str] = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 126 | 0 |
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class __lowerCAmelCase ( UpperCAmelCase__ ):
def __init__( self : List[str] , snake_case__ : Union[str, "sqlalchemy.sql.Selectable"] , snake_case__ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , snake_case__ : Optional[Features] = None , snake_case__ : str = None , snake_case__ : bool = False , **snake_case__ : Optional[Any] , ):
"""simple docstring"""
super().__init__(features=snake_case__ , cache_dir=snake_case__ , keep_in_memory=snake_case__ , **snake_case__ )
_UpperCAmelCase = Sql(
cache_dir=snake_case__ , features=snake_case__ , sql=snake_case__ , con=snake_case__ , **snake_case__ , )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
self.builder.download_and_prepare(
download_config=snake_case__ , download_mode=snake_case__ , verification_mode=snake_case__ , base_path=snake_case__ , )
# Build dataset for splits
_UpperCAmelCase = self.builder.as_dataset(
split="train" , verification_mode=snake_case__ , in_memory=self.keep_in_memory )
return dataset
class __lowerCAmelCase :
def __init__( self : str , snake_case__ : Dataset , snake_case__ : str , snake_case__ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , snake_case__ : Optional[int] = None , snake_case__ : Optional[int] = None , **snake_case__ : List[Any] , ):
"""simple docstring"""
if num_proc is not None and num_proc <= 0:
raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" )
_UpperCAmelCase = dataset
_UpperCAmelCase = name
_UpperCAmelCase = con
_UpperCAmelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_UpperCAmelCase = num_proc
_UpperCAmelCase = to_sql_kwargs
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
_UpperCAmelCase = self.to_sql_kwargs.pop("sql" , snake_case__ )
_UpperCAmelCase = self.to_sql_kwargs.pop("con" , snake_case__ )
_UpperCAmelCase = self.to_sql_kwargs.pop("index" , snake_case__ )
_UpperCAmelCase = self._write(index=snake_case__ , **self.to_sql_kwargs )
return written
def UpperCamelCase ( self : int , snake_case__ : List[Any] ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = args
_UpperCAmelCase = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs
_UpperCAmelCase = query_table(
table=self.dataset.data , key=slice(snake_case__ , offset + self.batch_size ) , indices=self.dataset._indices , )
_UpperCAmelCase = batch.to_pandas()
_UpperCAmelCase = df.to_sql(self.name , self.con , index=snake_case__ , **snake_case__ )
return num_rows or len(snake_case__ )
def UpperCamelCase ( self : Any , snake_case__ : List[Any] , **snake_case__ : List[Any] ):
"""simple docstring"""
_UpperCAmelCase = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
_UpperCAmelCase , _UpperCAmelCase = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , snake_case__ , snake_case__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += num_rows
return written
| 133 |
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
lowercase_ : str = {
'169M': 12,
'430M': 24,
'1B5': 24,
'3B': 32,
'7B': 32,
'14B': 40,
}
lowercase_ : Optional[Any] = {
'169M': 7_68,
'430M': 10_24,
'1B5': 20_48,
'3B': 25_60,
'7B': 40_96,
'14B': 51_20,
}
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
_UpperCAmelCase = list(state_dict.keys() )
for name in state_dict_keys:
_UpperCAmelCase = state_dict.pop(snake_case_ )
# emb -> embedding
if name.startswith("emb." ):
_UpperCAmelCase = name.replace("emb." , "embeddings." )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("blocks.0.ln0" ):
_UpperCAmelCase = name.replace("blocks.0.ln0" , "blocks.0.pre_ln" )
# att -> attention
_UpperCAmelCase = re.sub(R"blocks\.(\d+)\.att" , R"blocks.\1.attention" , snake_case_ )
# ffn -> feed_forward
_UpperCAmelCase = re.sub(R"blocks\.(\d+)\.ffn" , R"blocks.\1.feed_forward" , snake_case_ )
# time_mix_k -> time_mix_key and reshape
if name.endswith(".time_mix_k" ):
_UpperCAmelCase = name.replace(".time_mix_k" , ".time_mix_key" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(".time_mix_v" ):
_UpperCAmelCase = name.replace(".time_mix_v" , ".time_mix_value" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(".time_mix_r" ):
_UpperCAmelCase = name.replace(".time_mix_r" , ".time_mix_receptance" )
if name != "head.weight":
_UpperCAmelCase = "rwkv." + name
_UpperCAmelCase = weight
return state_dict
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ , snake_case_=None , snake_case_=None , snake_case_=False , snake_case_=None ):
'''simple docstring'''
if tokenizer_file is None:
print("No `--tokenizer_file` provided, we will use the default tokenizer." )
_UpperCAmelCase = 5_0277
_UpperCAmelCase = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b" )
else:
_UpperCAmelCase = PreTrainedTokenizerFast(tokenizer_file=snake_case_ )
_UpperCAmelCase = len(snake_case_ )
tokenizer.save_pretrained(snake_case_ )
# 2. Build the config
_UpperCAmelCase = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
_UpperCAmelCase = candidate
break
if size is None:
raise ValueError("Could not infer the size, please provide it with the `--size` argument." )
if size not in possible_sizes:
raise ValueError(f"""`size` should be one of {possible_sizes}, got {size}.""" )
_UpperCAmelCase = RwkvConfig(
vocab_size=snake_case_ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(snake_case_ )
# 3. Download model file then convert state_dict
_UpperCAmelCase = hf_hub_download(snake_case_ , snake_case_ )
_UpperCAmelCase = torch.load(snake_case_ , map_location="cpu" )
_UpperCAmelCase = convert_state_dict(snake_case_ )
# 4. Split in shards and save
_UpperCAmelCase , _UpperCAmelCase = shard_checkpoint(snake_case_ )
for shard_file, shard in shards.items():
torch.save(snake_case_ , os.path.join(snake_case_ , snake_case_ ) )
if index is not None:
_UpperCAmelCase = os.path.join(snake_case_ , snake_case_ )
# Save the index as well
with open(snake_case_ , "w" , encoding="utf-8" ) as f:
_UpperCAmelCase = json.dumps(snake_case_ , indent=2 , sort_keys=snake_case_ ) + "\n"
f.write(snake_case_ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model." )
_UpperCAmelCase = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
_UpperCAmelCase = torch.load(os.path.join(snake_case_ , snake_case_ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(snake_case_ , snake_case_ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("Please provide a `model_name` to push the model to the Hub." )
_UpperCAmelCase = AutoModelForCausalLM.from_pretrained(snake_case_ )
model.push_to_hub(snake_case_ , max_shard_size="2GB" )
tokenizer.push_to_hub(snake_case_ )
if __name__ == "__main__":
lowercase_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.'
)
parser.add_argument(
'--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.'
)
parser.add_argument(
'--output_dir', default=None, type=str, required=True, help='Where to save the converted model.'
)
parser.add_argument(
'--tokenizer_file',
default=None,
type=str,
help='Path to the tokenizer file to use (if not provided, only the model is converted).',
)
parser.add_argument(
'--size',
default=None,
type=str,
help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Push to the Hub the converted model.',
)
parser.add_argument(
'--model_name',
default=None,
type=str,
help='Name of the pushed model on the Hub, including the username / organization.',
)
lowercase_ : List[Any] = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 133 | 1 |
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None ):
# Recurse if needed
if "." in tensor_name:
A : List[Any] = tensor_name.split("." )
for split in splits[:-1]:
A : Optional[Any] = getattr(_lowerCamelCase , _lowerCamelCase )
if new_module is None:
raise ValueError(f"""{module} has no attribute {split}.""" )
A : int = new_module
A : Optional[int] = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f"""{module} does not have a parameter or a buffer named {tensor_name}.""" )
A : List[Any] = tensor_name in module._buffers
A : Any = getattr(_lowerCamelCase , _lowerCamelCase )
if old_value.device == torch.device("meta" ) and device not in ["meta", torch.device("meta" )] and value is None:
raise ValueError(f"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" )
A : Dict = False
A : List[Any] = False
if is_buffer or not is_bitsandbytes_available():
A : Optional[int] = False
A : Dict = False
else:
A : Any = hasattr(bnb.nn , "Params4bit" ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
A : Any = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
A : Union[str, Any] = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
A : List[str] = old_value.to(_lowerCamelCase )
elif isinstance(_lowerCamelCase , torch.Tensor ):
A : Any = value.to("cpu" )
if value.dtype == torch.inta:
A : Optional[int] = version.parse(importlib.metadata.version("bitsandbytes" ) ) > version.parse(
"0.37.2" )
if not is_abit_serializable:
raise ValueError(
"Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. "
"Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`." )
else:
A : int = torch.tensor(_lowerCamelCase , device="cpu" )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , _lowerCamelCase ) and fpaa_statistics is None:
A : Dict = new_value.T
A : int = old_value.__dict__
if is_abit:
A : Optional[Any] = bnb.nn.IntaParams(_lowerCamelCase , requires_grad=_lowerCamelCase , **_lowerCamelCase ).to(_lowerCamelCase )
elif is_abit:
A : Dict = bnb.nn.Paramsabit(_lowerCamelCase , requires_grad=_lowerCamelCase , **_lowerCamelCase ).to(_lowerCamelCase )
A : str = new_value
if fpaa_statistics is not None:
setattr(module.weight , "SCB" , fpaa_statistics.to(_lowerCamelCase ) )
else:
if value is None:
A : int = old_value.to(_lowerCamelCase )
elif isinstance(_lowerCamelCase , torch.Tensor ):
A : Optional[int] = value.to(_lowerCamelCase )
else:
A : Tuple = torch.tensor(_lowerCamelCase , device=_lowerCamelCase )
if is_buffer:
A : str = new_value
else:
A : List[Any] = nn.Parameter(_lowerCamelCase , requires_grad=old_value.requires_grad )
A : List[str] = new_value
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=False ):
for name, module in model.named_children():
if current_key_name is None:
A : Dict = []
current_key_name.append(_lowerCamelCase )
if (isinstance(_lowerCamelCase , nn.Linear ) or isinstance(_lowerCamelCase , _lowerCamelCase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in ".".join(_lowerCamelCase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(_lowerCamelCase , _lowerCamelCase ):
A , A : List[str] = module.weight.shape
else:
A : List[Any] = module.in_features
A : Any = module.out_features
if quantization_config.quantization_method() == "llm_int8":
A : List[str] = bnb.nn.LinearabitLt(
_lowerCamelCase , _lowerCamelCase , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
A : List[str] = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
A : str = bnb.nn.Linearabit(
_lowerCamelCase , _lowerCamelCase , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
A : Optional[int] = True
# Store the module class in case we need to transpose the weight later
A : Tuple = type(_lowerCamelCase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(_lowerCamelCase )
if len(list(module.children() ) ) > 0:
A , A : Any = _replace_with_bnb_linear(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , has_been_replaced=_lowerCamelCase , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None ):
A : str = ["lm_head"] if modules_to_not_convert is None else modules_to_not_convert
A , A : Union[str, Any] = _replace_with_bnb_linear(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug." )
return model
def UpperCAmelCase ( *_lowerCamelCase , **_lowerCamelCase ):
warnings.warn(
"`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead" , _lowerCamelCase , )
return replace_with_bnb_linear(*_lowerCamelCase , **_lowerCamelCase )
def UpperCAmelCase ( *_lowerCamelCase , **_lowerCamelCase ):
warnings.warn(
"`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead" , _lowerCamelCase , )
return set_module_quantized_tensor_to_device(*_lowerCamelCase , **_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase ):
A : Union[str, Any] = deepcopy(_lowerCamelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
A : str = find_tied_parameters(_lowerCamelCase )
# For compatibility with Accelerate < 0.18
if isinstance(_lowerCamelCase , _lowerCamelCase ):
A : List[str] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
A : str = sum(_lowerCamelCase , [] )
A : List[str] = len(_lowerCamelCase ) > 0
# Check if it is a base model
A : Optional[int] = not hasattr(_lowerCamelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
A : Optional[Any] = list(model.named_children() )
A : Optional[int] = [list_modules[-1][0]]
# add last module together with tied weights
A : List[str] = set(_lowerCamelCase ) - set(_lowerCamelCase )
A : int = list(set(_lowerCamelCase ) ) + list(_lowerCamelCase )
# remove ".weight" from the keys
A : Any = [".weight", ".bias"]
A : Optional[Any] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
A : int = name.replace(_lowerCamelCase , "" )
filtered_module_names.append(_lowerCamelCase )
return filtered_module_names | 256 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
__SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"""
),
"""google/electra-base-generator""": """https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt""",
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"""
),
"""google/electra-base-generator""": (
"""https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"""
),
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"""
),
},
}
__SCREAMING_SNAKE_CASE = {
"""google/electra-small-generator""": 512,
"""google/electra-base-generator""": 512,
"""google/electra-large-generator""": 512,
"""google/electra-small-discriminator""": 512,
"""google/electra-base-discriminator""": 512,
"""google/electra-large-discriminator""": 512,
}
__SCREAMING_SNAKE_CASE = {
"""google/electra-small-generator""": {"""do_lower_case""": True},
"""google/electra-base-generator""": {"""do_lower_case""": True},
"""google/electra-large-generator""": {"""do_lower_case""": True},
"""google/electra-small-discriminator""": {"""do_lower_case""": True},
"""google/electra-base-discriminator""": {"""do_lower_case""": True},
"""google/electra-large-discriminator""": {"""do_lower_case""": True},
}
class lowerCamelCase_ ( _A ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_INIT_CONFIGURATION
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = ElectraTokenizer
def __init__( self : int , __lowerCamelCase : str=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Tuple=True , __lowerCamelCase : int="[UNK]" , __lowerCamelCase : Any="[SEP]" , __lowerCamelCase : Union[str, Any]="[PAD]" , __lowerCamelCase : str="[CLS]" , __lowerCamelCase : Tuple="[MASK]" , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : str=None , **__lowerCamelCase : str , ) -> List[str]:
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , do_lower_case=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , tokenize_chinese_chars=__lowerCamelCase , strip_accents=__lowerCamelCase , **__lowerCamelCase , )
A : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , __lowerCamelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , __lowerCamelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , __lowerCamelCase ) != tokenize_chinese_chars
):
A : Union[str, Any] = getattr(__lowerCamelCase , normalizer_state.pop("type" ) )
A : List[Any] = do_lower_case
A : Tuple = strip_accents
A : Any = tokenize_chinese_chars
A : Tuple = normalizer_class(**__lowerCamelCase )
A : Optional[Any] = do_lower_case
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict=None ) -> List[Any]:
A : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
A : int = [self.sep_token_id]
A : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
A : List[Any] = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase ) | 256 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
import random
from typing import Any
class snake_case :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
lowerCamelCase_ = []
lowerCamelCase_ = 0
lowerCamelCase_ = 0
def snake_case ( self ):
"""simple docstring"""
return self.head == self.tail
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
self.data.append(UpperCamelCase )
lowerCamelCase_ = self.tail + 1
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.data[self.head]
lowerCamelCase_ = self.head + 1
return ret
def snake_case ( self ):
"""simple docstring"""
return self.tail - self.head
def snake_case ( self ):
"""simple docstring"""
print(self.data )
print("**************" )
print(self.data[self.head : self.tail] )
class snake_case :
"""simple docstring"""
def __init__( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = data
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = 1
def snake_case ( self ):
"""simple docstring"""
return self.data
def snake_case ( self ):
"""simple docstring"""
return self.left
def snake_case ( self ):
"""simple docstring"""
return self.right
def snake_case ( self ):
"""simple docstring"""
return self.height
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = data
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = node
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = node
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = height
def __snake_case ( UpperCAmelCase_ : MyNode | None ):
if node is None:
return 0
return node.get_height()
def __snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
if a > b:
return a
return b
def __snake_case ( UpperCAmelCase_ : MyNode ):
print("left rotation node:" , node.get_data() )
lowerCamelCase_ = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(UpperCAmelCase_ )
lowerCamelCase_ = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(UpperCAmelCase_ )
lowerCamelCase_ = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(UpperCAmelCase_ )
return ret
def __snake_case ( UpperCAmelCase_ : MyNode ):
print("right rotation node:" , node.get_data() )
lowerCamelCase_ = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(UpperCAmelCase_ )
lowerCamelCase_ = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(UpperCAmelCase_ )
lowerCamelCase_ = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(UpperCAmelCase_ )
return ret
def __snake_case ( UpperCAmelCase_ : MyNode ):
lowerCamelCase_ = node.get_left()
assert left_child is not None
node.set_left(left_rotation(UpperCAmelCase_ ) )
return right_rotation(UpperCAmelCase_ )
def __snake_case ( UpperCAmelCase_ : MyNode ):
lowerCamelCase_ = node.get_right()
assert right_child is not None
node.set_right(right_rotation(UpperCAmelCase_ ) )
return left_rotation(UpperCAmelCase_ )
def __snake_case ( UpperCAmelCase_ : MyNode | None , UpperCAmelCase_ : Any ):
if node is None:
return MyNode(UpperCAmelCase_ )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , UpperCAmelCase_ ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
lowerCamelCase_ = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
lowerCamelCase_ = right_rotation(UpperCAmelCase_ )
else:
lowerCamelCase_ = lr_rotation(UpperCAmelCase_ )
else:
node.set_right(insert_node(node.get_right() , UpperCAmelCase_ ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
lowerCamelCase_ = node.get_right()
assert right_child is not None
if data < right_child.get_data():
lowerCamelCase_ = rl_rotation(UpperCAmelCase_ )
else:
lowerCamelCase_ = left_rotation(UpperCAmelCase_ )
lowerCamelCase_ = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(UpperCAmelCase_ )
return node
def __snake_case ( UpperCAmelCase_ : MyNode ):
while True:
lowerCamelCase_ = root.get_right()
if right_child is None:
break
lowerCamelCase_ = right_child
return root.get_data()
def __snake_case ( UpperCAmelCase_ : MyNode ):
while True:
lowerCamelCase_ = root.get_left()
if left_child is None:
break
lowerCamelCase_ = left_child
return root.get_data()
def __snake_case ( UpperCAmelCase_ : MyNode , UpperCAmelCase_ : Any ):
lowerCamelCase_ = root.get_left()
lowerCamelCase_ = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
lowerCamelCase_ = get_left_most(UpperCAmelCase_ )
root.set_data(UpperCAmelCase_ )
root.set_right(del_node(UpperCAmelCase_ , UpperCAmelCase_ ) )
elif left_child is not None:
lowerCamelCase_ = left_child
elif right_child is not None:
lowerCamelCase_ = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print("No such data" )
return root
else:
root.set_left(del_node(UpperCAmelCase_ , UpperCAmelCase_ ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(UpperCAmelCase_ , UpperCAmelCase_ ) )
if get_height(UpperCAmelCase_ ) - get_height(UpperCAmelCase_ ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
lowerCamelCase_ = left_rotation(UpperCAmelCase_ )
else:
lowerCamelCase_ = rl_rotation(UpperCAmelCase_ )
elif get_height(UpperCAmelCase_ ) - get_height(UpperCAmelCase_ ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
lowerCamelCase_ = right_rotation(UpperCAmelCase_ )
else:
lowerCamelCase_ = lr_rotation(UpperCAmelCase_ )
lowerCamelCase_ = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(UpperCAmelCase_ )
return root
class snake_case :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
lowerCamelCase_ = None
def snake_case ( self ):
"""simple docstring"""
return get_height(self.root )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
print("insert:" + str(UpperCamelCase ) )
lowerCamelCase_ = insert_node(self.root , UpperCamelCase )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
print("delete:" + str(UpperCamelCase ) )
if self.root is None:
print("Tree is empty!" )
return
lowerCamelCase_ = del_node(self.root , UpperCamelCase )
def __str__( self , ): # a level traversale, gives a more intuitive look on the tree
"""simple docstring"""
lowerCamelCase_ = ""
lowerCamelCase_ = MyQueue()
q.push(self.root )
lowerCamelCase_ = self.get_height()
if layer == 0:
return output
lowerCamelCase_ = 0
while not q.is_empty():
lowerCamelCase_ = q.pop()
lowerCamelCase_ = " " * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(UpperCamelCase )
q.push(UpperCamelCase )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
lowerCamelCase_ = cnt + 1
for i in range(100 ):
if cnt == math.pow(2 , UpperCamelCase ) - 1:
lowerCamelCase_ = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def __snake_case ( ):
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
a_ : Tuple = AVLtree()
a_ : Tuple = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 55 |
"""simple docstring"""
def UpperCamelCase ( _lowerCAmelCase : int = 3, _lowerCAmelCase : int = 7, _lowerCAmelCase : int = 1000000 ) -> int:
_UpperCAmelCase : Dict = 0
_UpperCAmelCase : int = 1
for current_denominator in range(1, limit + 1 ):
_UpperCAmelCase : Union[str, Any] = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
_UpperCAmelCase : Optional[Any] = current_numerator
_UpperCAmelCase : str = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_00_00_00))
| 246 | 0 |
def lowerCamelCase__ ( ) -> Tuple:
__snake_case = 0
for i in range(1 , 1001 ):
total += i**i
return str(snake_case_ )[-10:]
if __name__ == "__main__":
print(solution())
| 238 |
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
snake_case_ = data_utils.TransfoXLTokenizer
snake_case_ = data_utils.TransfoXLCorpus
snake_case_ = data_utils
snake_case_ = data_utils
def lowerCamelCase__ ( snake_case_ : Union[str, Any] , snake_case_ : int , snake_case_ : List[Any] , snake_case_ : int ) -> Dict:
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(snake_case_ , '''rb''' ) as fp:
__snake_case = pickle.load(snake_case_ , encoding='''latin1''' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
__snake_case = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''pretrained_vocab_file''']
print(f"""Save vocabulary to {pytorch_vocab_dump_path}""" )
__snake_case = corpus.vocab.__dict__
torch.save(snake_case_ , snake_case_ )
__snake_case = corpus.__dict__
corpus_dict_no_vocab.pop('''vocab''' , snake_case_ )
__snake_case = pytorch_dump_folder_path + '''/''' + CORPUS_NAME
print(f"""Save dataset to {pytorch_dataset_dump_path}""" )
torch.save(snake_case_ , snake_case_ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
__snake_case = os.path.abspath(snake_case_ )
__snake_case = os.path.abspath(snake_case_ )
print(f"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" )
# Initialise PyTorch model
if transfo_xl_config_file == "":
__snake_case = TransfoXLConfig()
else:
__snake_case = TransfoXLConfig.from_json_file(snake_case_ )
print(f"""Building PyTorch model from configuration: {config}""" )
__snake_case = TransfoXLLMHeadModel(snake_case_ )
__snake_case = load_tf_weights_in_transfo_xl(snake_case_ , snake_case_ , snake_case_ )
# Save pytorch-model
__snake_case = os.path.join(snake_case_ , snake_case_ )
__snake_case = os.path.join(snake_case_ , snake_case_ )
print(f"""Save PyTorch model to {os.path.abspath(snake_case_ )}""" )
torch.save(model.state_dict() , snake_case_ )
print(f"""Save configuration file to {os.path.abspath(snake_case_ )}""" )
with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--tf_checkpoint_path',
default='',
type=str,
help='An optional path to a TensorFlow checkpoint path to be converted.',
)
parser.add_argument(
'--transfo_xl_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--transfo_xl_dataset_file',
default='',
type=str,
help='An optional dataset file to be converted in a vocabulary.',
)
snake_case_ = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 238 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
"facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = """levit"""
def __init__( self , __lowerCAmelCase=2_2_4 , __lowerCAmelCase=3 , __lowerCAmelCase=3 , __lowerCAmelCase=2 , __lowerCAmelCase=1 , __lowerCAmelCase=1_6 , __lowerCAmelCase=[1_2_8, 2_5_6, 3_8_4] , __lowerCAmelCase=[4, 8, 1_2] , __lowerCAmelCase=[4, 4, 4] , __lowerCAmelCase=[1_6, 1_6, 1_6] , __lowerCAmelCase=0 , __lowerCAmelCase=[2, 2, 2] , __lowerCAmelCase=[2, 2, 2] , __lowerCAmelCase=0.02 , **__lowerCAmelCase , ):
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
lowerCamelCase__ = image_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = kernel_size
lowerCamelCase__ = stride
lowerCamelCase__ = padding
lowerCamelCase__ = hidden_sizes
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = depths
lowerCamelCase__ = key_dim
lowerCamelCase__ = drop_path_rate
lowerCamelCase__ = patch_size
lowerCamelCase__ = attention_ratio
lowerCamelCase__ = mlp_ratio
lowerCamelCase__ = initializer_range
lowerCamelCase__ = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = version.parse("""1.11""" )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return 1E-4
| 209 |
def lowerCAmelCase__(__snake_case ) -> str:
'''simple docstring'''
return "".join(chr(ord(__snake_case ) - 32 ) if '''a''' <= char <= '''z''' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 209 | 1 |
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase_ =(KDPMaDiscreteScheduler,)
UpperCAmelCase_ =10
def _UpperCamelCase ( self , **_A ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**UpperCAmelCase__ )
return config
def _UpperCamelCase ( self ) -> List[str]:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase__ )
def _UpperCamelCase ( self ) -> List[str]:
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=UpperCAmelCase__ , beta_end=UpperCAmelCase__ )
def _UpperCamelCase ( self ) -> Optional[int]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=UpperCAmelCase__ )
def _UpperCamelCase ( self ) -> Any:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase__ )
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config(prediction_type='''v_prediction''' )
SCREAMING_SNAKE_CASE_ = scheduler_class(**UpperCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE_ = self.dummy_model()
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE_ = sample.to(UpperCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE_ = scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE_ = model(UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE_ = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE_ = output.prev_sample
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(UpperCAmelCase__ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(UpperCAmelCase__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_934E-07 ) < 1E-2
assert abs(result_mean.item() - 6.1_112E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.693_428_650_170_972E-07 ) < 1E-2
assert abs(result_mean.item() - 0.0002 ) < 1E-3
def _UpperCamelCase ( self ) -> List[str]:
if torch_device == "mps":
return
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ = scheduler_class(**UpperCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE_ = self.dummy_model()
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE_ = sample.to(UpperCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE_ = scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE_ = model(UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE_ = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE_ = output.prev_sample
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(UpperCAmelCase__ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(UpperCAmelCase__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
def _UpperCamelCase ( self ) -> List[Any]:
if torch_device == "mps":
return
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ = scheduler_class(**UpperCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE_ = self.dummy_model()
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter.to(UpperCAmelCase__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE_ = scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE_ = model(UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE_ = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE_ = output.prev_sample
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(UpperCAmelCase__ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(UpperCAmelCase__ ) )
if str(UpperCAmelCase__ ).startswith('''cpu''' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
| 361 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"microsoft/unispeech-sat-base-100h-libri-ft": (
"https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json"
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ ="unispeech-sat"
def __init__( self , _A=32 , _A=768 , _A=12 , _A=12 , _A=3072 , _A="gelu" , _A=0.1 , _A=0.1 , _A=0.1 , _A=0.0 , _A=0.0 , _A=0.1 , _A=0.1 , _A=0.02 , _A=1E-5 , _A="group" , _A="gelu" , _A=(512, 512, 512, 512, 512, 512, 512) , _A=(5, 2, 2, 2, 2, 2, 2) , _A=(10, 3, 3, 3, 3, 2, 2) , _A=False , _A=128 , _A=16 , _A=False , _A=True , _A=0.05 , _A=10 , _A=2 , _A=0.0 , _A=10 , _A=0 , _A=320 , _A=2 , _A=0.1 , _A=100 , _A=256 , _A=256 , _A=0.1 , _A="mean" , _A=False , _A=False , _A=256 , _A=(512, 512, 512, 512, 1500) , _A=(5, 3, 3, 1, 1) , _A=(1, 2, 3, 1, 1) , _A=512 , _A=0 , _A=1 , _A=2 , _A=504 , **_A , ) -> Tuple:
super().__init__(**_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A )
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = feat_extract_norm
SCREAMING_SNAKE_CASE_ = feat_extract_activation
SCREAMING_SNAKE_CASE_ = list(_A )
SCREAMING_SNAKE_CASE_ = list(_A )
SCREAMING_SNAKE_CASE_ = list(_A )
SCREAMING_SNAKE_CASE_ = conv_bias
SCREAMING_SNAKE_CASE_ = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE_ = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE_ = len(self.conv_dim )
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_dropout
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = activation_dropout
SCREAMING_SNAKE_CASE_ = feat_proj_dropout
SCREAMING_SNAKE_CASE_ = final_dropout
SCREAMING_SNAKE_CASE_ = layerdrop
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = num_clusters
SCREAMING_SNAKE_CASE_ = do_stable_layer_norm
SCREAMING_SNAKE_CASE_ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE_ = apply_spec_augment
SCREAMING_SNAKE_CASE_ = mask_time_prob
SCREAMING_SNAKE_CASE_ = mask_time_length
SCREAMING_SNAKE_CASE_ = mask_time_min_masks
SCREAMING_SNAKE_CASE_ = mask_feature_prob
SCREAMING_SNAKE_CASE_ = mask_feature_length
SCREAMING_SNAKE_CASE_ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
SCREAMING_SNAKE_CASE_ = num_codevectors_per_group
SCREAMING_SNAKE_CASE_ = num_codevector_groups
SCREAMING_SNAKE_CASE_ = contrastive_logits_temperature
SCREAMING_SNAKE_CASE_ = feat_quantizer_dropout
SCREAMING_SNAKE_CASE_ = num_negatives
SCREAMING_SNAKE_CASE_ = codevector_dim
SCREAMING_SNAKE_CASE_ = proj_codevector_dim
SCREAMING_SNAKE_CASE_ = diversity_loss_weight
# ctc loss
SCREAMING_SNAKE_CASE_ = ctc_loss_reduction
SCREAMING_SNAKE_CASE_ = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE_ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE_ = list(_A )
SCREAMING_SNAKE_CASE_ = list(_A )
SCREAMING_SNAKE_CASE_ = list(_A )
SCREAMING_SNAKE_CASE_ = xvector_output_dim
@property
def _UpperCamelCase ( self ) -> str:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 257 | 0 |
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class A_ ( _lowerCamelCase , unittest.TestCase ):
_UpperCAmelCase : Dict = RoFormerTokenizer
_UpperCAmelCase : Optional[int] = RoFormerTokenizerFast
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : Union[str, Any] = True
def lowerCAmelCase ( self : Tuple):
super().setUp()
def lowerCAmelCase ( self : List[str] ,**SCREAMING_SNAKE_CASE__ : str):
return self.tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' ,**lowercase_)
def lowerCAmelCase ( self : int ,**SCREAMING_SNAKE_CASE__ : int):
return self.rust_tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' ,**lowercase_)
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : int = '永和服装饰品有限公司,今天天气非常好'
__lowerCamelCase : List[str] = '永和 服装 饰品 有限公司 , 今 天 天 气 非常 好'
return input_text, output_text
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : Any = self.get_tokenizer()
__lowerCamelCase , __lowerCamelCase : Tuple = self.get_chinese_input_output_texts()
__lowerCamelCase : Optional[int] = tokenizer.tokenize(lowercase_)
self.assertListEqual(lowercase_ ,output_text.split())
__lowerCamelCase : List[str] = tokens + [tokenizer.unk_token]
__lowerCamelCase : List[Any] = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_) ,lowercase_)
def lowerCAmelCase ( self : int):
__lowerCamelCase : int = self.get_rust_tokenizer()
__lowerCamelCase , __lowerCamelCase : int = self.get_chinese_input_output_texts()
__lowerCamelCase : List[str] = tokenizer.tokenize(lowercase_)
self.assertListEqual(lowercase_ ,output_text.split())
__lowerCamelCase : Optional[Any] = tokens + [tokenizer.unk_token]
__lowerCamelCase : List[str] = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_) ,lowercase_)
def lowerCAmelCase ( self : int):
pass
def lowerCAmelCase ( self : List[Any]):
pass
def lowerCAmelCase ( self : Optional[int]):
pass
| 73 |
'''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
a : Dict = None
a : List[Any] = logging.get_logger(__name__)
a : List[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
a : str = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
a : List[Any] = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
class a ( _lowerCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["input_ids", "attention_mask"]
snake_case_ = TaTokenizer
snake_case_ = []
def __init__( self : List[Any] , lowercase_ : int=None , lowercase_ : Dict=None , lowercase_ : Dict="</s>" , lowercase_ : List[Any]="<unk>" , lowercase_ : int="<pad>" , lowercase_ : int=100 , lowercase_ : List[Any]=None , **lowercase_ : List[str] , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
snake_case_ = [F"<extra_id_{i}>" for i in range(lowercase_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
snake_case_ = len(set(filter(lambda lowercase_ : bool('''extra_id_''' in str(lowercase_ ) ) , lowercase_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , extra_ids=lowercase_ , additional_special_tokens=lowercase_ , **lowercase_ , )
snake_case_ = vocab_file
snake_case_ = False if not self.vocab_file else True
snake_case_ = extra_ids
@staticmethod
def A_ ( lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : int ):
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
snake_case_ = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
F" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
F" {pretrained_model_name_or_path} automatically truncating your input to"
F" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"
F" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , lowercase_ , )
return max_model_length
def A_ ( self : Optional[Any] , lowercase_ : str , lowercase_ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(lowercase_ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
snake_case_ = os.path.join(
lowercase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ):
copyfile(self.vocab_file , lowercase_ )
logger.info(F"Copy vocab file to {out_vocab_file}" )
return (out_vocab_file,)
def A_ ( self : Optional[int] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ):
snake_case_ = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
snake_case_ = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def A_ ( self : int , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ):
snake_case_ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def A_ ( self : Dict ):
return list(
set(filter(lambda lowercase_ : bool(re.search(R'''<extra_id_\d+>''' , lowercase_ ) ) is not None , self.additional_special_tokens ) ) )
def A_ ( self : Any ):
return [self.convert_tokens_to_ids(lowercase_ ) for token in self.get_sentinel_tokens()]
| 56 | 0 |
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
a_ : Dict = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _snake_case ( A__ ):
def __init__( self , *a , a=None , a=None , a=None , **a) -> List[Any]:
super().__init__(*a , **a)
SCREAMING_SNAKE_CASE = eval_examples
SCREAMING_SNAKE_CASE = post_process_function
SCREAMING_SNAKE_CASE = quant_trainer_args
SCREAMING_SNAKE_CASE = 128 # default number of calibration samples
def SCREAMING_SNAKE_CASE__ ( self , a=None) -> Union[str, Any]:
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('Trainer: calibration requires an calib_dataset.')
SCREAMING_SNAKE_CASE = calib_dataset if calib_dataset is not None else self.calib_dataset
SCREAMING_SNAKE_CASE = self._remove_unused_columns(a , description='Calibration')
return DataLoader(
a , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=a , )
def SCREAMING_SNAKE_CASE__ ( self , a=None) -> Optional[Any]:
SCREAMING_SNAKE_CASE = self.train_dataset if calib_dataset is None else calib_dataset
SCREAMING_SNAKE_CASE = self.get_calib_dataloader(a)
SCREAMING_SNAKE_CASE = self.model
quant_trainer.configure_model(a , self.quant_trainer_args , calib=a)
model.eval()
quant_trainer.enable_calibration(a)
logger.info('***** Running calibration *****')
logger.info(f''' Num examples = {self.calib_num}''')
logger.info(f''' Batch size = {calib_dataloader.batch_size}''')
for step, inputs in enumerate(a):
# Prediction step
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.prediction_step(a , a , prediction_loss_only=a)
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(a , self.quant_trainer_args)
SCREAMING_SNAKE_CASE = model
def SCREAMING_SNAKE_CASE__ ( self , a=None , a=None , a=None , a = "eval") -> str:
SCREAMING_SNAKE_CASE = self.eval_dataset if eval_dataset is None else eval_dataset
SCREAMING_SNAKE_CASE = self.get_eval_dataloader(a)
SCREAMING_SNAKE_CASE = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE = self.compute_metrics
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
SCREAMING_SNAKE_CASE = eval_loop(
a , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=a , )
finally:
SCREAMING_SNAKE_CASE = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
SCREAMING_SNAKE_CASE = self.post_process_function(a , a , output.predictions)
SCREAMING_SNAKE_CASE = self.compute_metrics(a)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f'''{metric_key_prefix}_'''):
SCREAMING_SNAKE_CASE = metrics.pop(a)
self.log(a)
else:
SCREAMING_SNAKE_CASE = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
SCREAMING_SNAKE_CASE = self.callback_handler.on_evaluate(self.args , self.state , self.control , a)
return metrics
def SCREAMING_SNAKE_CASE__ ( self , a , a , a=None , a = "test") -> Optional[Any]:
SCREAMING_SNAKE_CASE = self.get_test_dataloader(a)
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE = self.compute_metrics
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
SCREAMING_SNAKE_CASE = eval_loop(
a , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=a , )
finally:
SCREAMING_SNAKE_CASE = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
SCREAMING_SNAKE_CASE = self.post_process_function(a , a , output.predictions , 'predict')
SCREAMING_SNAKE_CASE = self.compute_metrics(a)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f'''{metric_key_prefix}_'''):
SCREAMING_SNAKE_CASE = metrics.pop(a)
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=a)
def SCREAMING_SNAKE_CASE__ ( self , a="./") -> List[Any]:
SCREAMING_SNAKE_CASE = self.eval_dataset
SCREAMING_SNAKE_CASE = self.get_eval_dataloader(a)
SCREAMING_SNAKE_CASE = next(iter(a))
# saving device - to make it consistent
SCREAMING_SNAKE_CASE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# convert to tuple
SCREAMING_SNAKE_CASE = tuple(v.to(a) for k, v in batch.items())
logger.info('Converting model to be onnx compatible')
from pytorch_quantization.nn import TensorQuantizer
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = self.model.to(a)
model.eval()
model.float()
SCREAMING_SNAKE_CASE = model.module if hasattr(a , 'module') else model
quant_trainer.configure_model(a , self.quant_trainer_args)
SCREAMING_SNAKE_CASE = os.path.join(a , 'model.onnx')
logger.info(f'''exporting model to {output_model_file}''')
SCREAMING_SNAKE_CASE = {0: 'batch_size', 1: 'seq_len'}
torch.onnx.export(
a , a , a , export_params=a , opset_version=13 , do_constant_folding=a , input_names=['input_ids', 'attention_mask', 'token_type_ids'] , output_names=['output_start_logits', 'output_end_logits'] , dynamic_axes={
'input_ids': axes,
'attention_mask': axes,
'token_type_ids': axes,
'output_start_logits': axes,
'output_end_logits': axes,
} , verbose=a , )
logger.info('onnx export finished')
| 327 |
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = 'laion/clap-htsat-unfused'
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
def SCREAMING_SNAKE_CASE__ ( self , **a) -> Optional[Any]:
return RobertaTokenizer.from_pretrained(self.checkpoint , **a)
def SCREAMING_SNAKE_CASE__ ( self , **a) -> Union[str, Any]:
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **a)
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
shutil.rmtree(self.tmpdirname)
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_feature_extractor()
SCREAMING_SNAKE_CASE = ClapProcessor(tokenizer=a , feature_extractor=a)
processor.save_pretrained(self.tmpdirname)
SCREAMING_SNAKE_CASE = ClapProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.tokenizer , a)
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string())
self.assertIsInstance(processor.feature_extractor , a)
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor())
processor.save_pretrained(self.tmpdirname)
SCREAMING_SNAKE_CASE = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
SCREAMING_SNAKE_CASE = self.get_feature_extractor(do_normalize=a , padding_value=1.0)
SCREAMING_SNAKE_CASE = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=a , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , a)
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string())
self.assertIsInstance(processor.feature_extractor , a)
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = self.get_feature_extractor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = ClapProcessor(tokenizer=a , feature_extractor=a)
SCREAMING_SNAKE_CASE = floats_list((3, 1000))
SCREAMING_SNAKE_CASE = feature_extractor(a , return_tensors='np')
SCREAMING_SNAKE_CASE = processor(audios=a , return_tensors='np')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2)
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE = self.get_feature_extractor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = ClapProcessor(tokenizer=a , feature_extractor=a)
SCREAMING_SNAKE_CASE = 'This is a test string'
SCREAMING_SNAKE_CASE = processor(text=a)
SCREAMING_SNAKE_CASE = tokenizer(a)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = self.get_feature_extractor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = ClapProcessor(tokenizer=a , feature_extractor=a)
SCREAMING_SNAKE_CASE = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE = processor.batch_decode(a)
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(a)
self.assertListEqual(a , a)
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = self.get_feature_extractor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = ClapProcessor(tokenizer=a , feature_extractor=a)
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
| 327 | 1 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class A_ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : int = IFInpaintingSuperResolutionPipeline
_lowerCamelCase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
_lowerCamelCase : Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""} )
_lowerCamelCase : Optional[Any] = PipelineTesterMixin.required_optional_params - {"""latents"""}
def lowercase ( self : Union[str, Any] ):
return self._get_superresolution_dummy_components()
def lowercase ( self : Optional[int] , snake_case_ : Optional[Any] , snake_case_ : str=0 ):
if str(snake_case_ ).startswith("mps" ):
_UpperCAmelCase = torch.manual_seed(snake_case_ )
else:
_UpperCAmelCase = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
_UpperCAmelCase = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
_UpperCAmelCase = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
_UpperCAmelCase = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
_UpperCAmelCase = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowercase ( self : List[str] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def lowercase ( self : List[Any] ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def lowercase ( self : Any ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowercase ( self : Any ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowercase ( self : Tuple ):
self._test_save_load_local()
def lowercase ( self : Tuple ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 22 |
'''simple docstring'''
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 22 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a : Optional[int] = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
a : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 365 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Dict = logging.get_logger(__name__)
a : List[Any] = {
'''s-JoL/Open-Llama-V1''': '''https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json''',
}
class __UpperCamelCase ( a__ ):
lowerCamelCase : Tuple ="""open-llama"""
def __init__( self , lowerCAmelCase__=10_0000 , lowerCAmelCase__=4096 , lowerCAmelCase__=1_1008 , lowerCAmelCase__=32 , lowerCAmelCase__=32 , lowerCAmelCase__="silu" , lowerCAmelCase__=2048 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-6 , lowerCAmelCase__=True , lowerCAmelCase__=0 , lowerCAmelCase__=1 , lowerCAmelCase__=2 , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> Tuple:
a : Any = vocab_size
a : List[str] = max_position_embeddings
a : int = hidden_size
a : str = intermediate_size
a : List[str] = num_hidden_layers
a : int = num_attention_heads
a : Dict = hidden_act
a : Union[str, Any] = initializer_range
a : Tuple = rms_norm_eps
a : Union[str, Any] = use_cache
a : Union[str, Any] = kwargs.pop(
"use_memorry_efficient_attention" , lowerCAmelCase__ )
a : int = hidden_dropout_prob
a : Tuple = attention_dropout_prob
a : Optional[Any] = use_stable_embedding
a : str = shared_input_output_embedding
a : str = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , tie_word_embeddings=lowerCAmelCase__ , **lowerCAmelCase__ , )
def __a ( self ) -> Union[str, Any]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowerCAmelCase__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f"""got {self.rope_scaling}""" )
a : Any = self.rope_scaling.get("type" , lowerCAmelCase__ )
a : List[str] = self.rope_scaling.get("factor" , lowerCAmelCase__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 79 | 0 |
"""simple docstring"""
def lowerCamelCase ( _UpperCamelCase : int = 1_0_0_0 ) -> int:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = -1
__UpperCAmelCase : Optional[Any] = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
__UpperCAmelCase : Optional[int] = (n * n - 2 * a * n) // (2 * n - 2 * a)
__UpperCAmelCase : Union[str, Any] = n - a - b
if c * c == (a * a + b * b):
__UpperCAmelCase : Union[str, Any] = a * b * c
if candidate >= product:
__UpperCAmelCase : Any = candidate
return product
if __name__ == "__main__":
print(F"{solution() = }")
| 115 |
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase__ ( A , unittest.TestCase ):
"""simple docstring"""
__a = None
__a = BloomTokenizerFast
__a = BloomTokenizerFast
__a = True
__a = False
__a = """tokenizer_file"""
__a = {"""bos_token""": """<s>""", """eos_token""": """</s>""", """unk_token""": """<unk>""", """pad_token""": """<pad>"""}
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
super().setUp()
__UpperCAmelCase : Any = BloomTokenizerFast.from_pretrained("""bigscience/tokenizer""" )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self : Any , **UpperCamelCase : Optional[int] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase )
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Any = self.get_rust_tokenizer()
__UpperCAmelCase : Optional[Any] = ["""The quick brown fox</s>""", """jumps over the lazy dog</s>"""]
__UpperCAmelCase : int = [[2_175, 23_714, 73_173, 144_252, 2], [77, 132_619, 3_478, 368, 109_586, 35_433, 2]]
__UpperCAmelCase : Dict = tokenizer.batch_encode_plus(UpperCamelCase )["""input_ids"""]
self.assertListEqual(UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : int = tokenizer.batch_decode(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def lowerCamelCase__ ( self : int , UpperCamelCase : Any=6 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
__UpperCAmelCase : Dict = """This is a simple input"""
__UpperCAmelCase : str = ["""This is a simple input 1""", """This is a simple input 2"""]
__UpperCAmelCase : List[str] = ("""This is a simple input""", """This is a pair""")
__UpperCAmelCase : Dict = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
try:
tokenizer_r.encode(UpperCamelCase , max_length=UpperCamelCase )
tokenizer_r.encode_plus(UpperCamelCase , max_length=UpperCamelCase )
tokenizer_r.batch_encode_plus(UpperCamelCase , max_length=UpperCamelCase )
tokenizer_r.encode(UpperCamelCase , max_length=UpperCamelCase )
tokenizer_r.batch_encode_plus(UpperCamelCase , max_length=UpperCamelCase )
except ValueError:
self.fail("""Bloom Tokenizer should be able to deal with padding""" )
__UpperCAmelCase : Union[str, Any] = None # Hotfixing padding = None
self.assertRaises(UpperCamelCase , tokenizer_r.encode , UpperCamelCase , max_length=UpperCamelCase , padding="""max_length""" )
# Simple input
self.assertRaises(UpperCamelCase , tokenizer_r.encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="""max_length""" )
# Simple input
self.assertRaises(
UpperCamelCase , tokenizer_r.batch_encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="""max_length""" , )
# Pair input
self.assertRaises(UpperCamelCase , tokenizer_r.encode , UpperCamelCase , max_length=UpperCamelCase , padding="""max_length""" )
# Pair input
self.assertRaises(UpperCamelCase , tokenizer_r.encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="""max_length""" )
# Pair input
self.assertRaises(
UpperCamelCase , tokenizer_r.batch_encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="""max_length""" , )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = self.get_rust_tokenizer()
__UpperCAmelCase : Optional[Any] = load_dataset("""xnli""" , """all_languages""" , split="""test""" , streaming=UpperCamelCase )
__UpperCAmelCase : Optional[Any] = next(iter(UpperCamelCase ) )["""premise"""] # pick up one data
__UpperCAmelCase : Any = list(sample_data.values() )
__UpperCAmelCase : Optional[Any] = list(map(tokenizer.encode , UpperCamelCase ) )
__UpperCAmelCase : List[Any] = [tokenizer.decode(UpperCamelCase , clean_up_tokenization_spaces=UpperCamelCase ) for x in output_tokens]
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 115 | 1 |
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 352 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowercase = {
'''configuration_layoutlmv3''': [
'''LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LayoutLMv3Config''',
'''LayoutLMv3OnnxConfig''',
],
'''processing_layoutlmv3''': ['''LayoutLMv3Processor'''],
'''tokenization_layoutlmv3''': ['''LayoutLMv3Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ['''LayoutLMv3TokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv3ForQuestionAnswering''',
'''LayoutLMv3ForSequenceClassification''',
'''LayoutLMv3ForTokenClassification''',
'''LayoutLMv3Model''',
'''LayoutLMv3PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLayoutLMv3ForQuestionAnswering''',
'''TFLayoutLMv3ForSequenceClassification''',
'''TFLayoutLMv3ForTokenClassification''',
'''TFLayoutLMv3Model''',
'''TFLayoutLMv3PreTrainedModel''',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ['''LayoutLMv3FeatureExtractor''']
__lowercase = ['''LayoutLMv3ImageProcessor''']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 105 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__A = logging.get_logger(__name__)
class UpperCAmelCase (lowerCamelCase_ ):
"""simple docstring"""
_UpperCAmelCase :List[str] = ["""pixel_values"""]
def __init__( self , _UpperCAmelCase = True , _UpperCAmelCase = 32 , _UpperCAmelCase=PILImageResampling.BILINEAR , _UpperCAmelCase = True , **_UpperCAmelCase , ):
lowercase__: Dict = do_resize
lowercase__: List[Any] = do_rescale
lowercase__: Dict = size_divisor
lowercase__: List[str] = resample
super().__init__(**_UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase ):
lowercase__, lowercase__: Optional[Any] = get_image_size(_UpperCAmelCase )
# Rounds the height and width down to the closest multiple of size_divisor
lowercase__: Any = height // size_divisor * size_divisor
lowercase__: Tuple = width // size_divisor * size_divisor
lowercase__: str = resize(_UpperCAmelCase , (new_h, new_w) , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
return image
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase ):
return rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase=None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , **_UpperCAmelCase , ):
lowercase__: Tuple = do_resize if do_resize is not None else self.do_resize
lowercase__: Tuple = do_rescale if do_rescale is not None else self.do_rescale
lowercase__: Union[str, Any] = size_divisor if size_divisor is not None else self.size_divisor
lowercase__: Dict = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
lowercase__: Tuple = make_list_of_images(_UpperCAmelCase )
if not valid_images(_UpperCAmelCase ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
lowercase__: Tuple = [to_numpy_array(_UpperCAmelCase ) for img in images]
if do_resize:
lowercase__: List[str] = [self.resize(_UpperCAmelCase , size_divisor=_UpperCAmelCase , resample=_UpperCAmelCase ) for image in images]
if do_rescale:
lowercase__: str = [self.rescale(_UpperCAmelCase , scale=1 / 255 ) for image in images]
lowercase__: Tuple = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images]
lowercase__: List[Any] = {'''pixel_values''': images}
return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
| 177 |
'''simple docstring'''
import math
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
UpperCAmelCase__ = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str]=1 , **SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
UpperCAmelCase__ = factor * value
UpperCAmelCase__ = value
while not is_prime(SCREAMING_SNAKE_CASE__ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **SCREAMING_SNAKE_CASE__ )
return value
| 346 | 0 |
"""simple docstring"""
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1024 , lowerCAmelCase_=1024 , lowerCAmelCase_=False , **lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = SeqaSeqDataset(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , type_path="train" , **lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = tok.pad_token_id
def get_lens(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = tqdm(
DataLoader(lowerCAmelCase_ , batch_size=512 , num_workers=8 , shuffle=lowerCAmelCase_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
__SCREAMING_SNAKE_CASE = []
for batch in dl:
__SCREAMING_SNAKE_CASE = batch['''input_ids'''].ne(lowerCAmelCase_ ).sum(1 ).tolist()
__SCREAMING_SNAKE_CASE = batch['''labels'''].ne(lowerCAmelCase_ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
max_lens.append(max(lowerCAmelCase_ , lowerCAmelCase_ ) )
else:
max_lens.extend(lowerCAmelCase_ )
return max_lens
__SCREAMING_SNAKE_CASE = get_lens(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = SeqaSeqDataset(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , type_path="val" , **lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = get_lens(lowerCAmelCase_ )
pickle_save(lowerCAmelCase_ , train_ds.len_file )
pickle_save(lowerCAmelCase_ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 371 |
"""simple docstring"""
from datetime import datetime
import requests
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = "https://downloadgram.net/wp-json/wppress/video-downloader/video?url="
__SCREAMING_SNAKE_CASE = requests.get(base_url + url ).json()[0]["urls"][0]["src"]
return requests.get(lowerCAmelCase_ ).content
if __name__ == "__main__":
a__ : str = input('''Enter Video/IGTV url: ''').strip()
a__ : List[Any] = F"{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"
with open(file_name, '''wb''') as fp:
fp.write(download_video(url))
print(F"Done. Video saved to disk as {file_name}.")
| 195 | 0 |
'''simple docstring'''
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = None
def __lowerCamelCase ( A__ , A__=0.999 , A__="cosine" , ) -> Tuple:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(A__ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(A__ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
UpperCamelCase = []
for i in range(A__ ):
UpperCamelCase = i / num_diffusion_timesteps
UpperCamelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(A__ ) / alpha_bar_fn(A__ ) , A__ ) )
return torch.tensor(A__ , dtype=torch.floataa )
class SCREAMING_SNAKE_CASE ( _a , _a ):
"""simple docstring"""
@register_to_config
def __init__( self : List[str] , UpperCamelCase__ : int = 1_0_0_0 , UpperCamelCase__ : str = "fixed_small_log" , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[float] = 1.0 , UpperCamelCase__ : str = "epsilon" , UpperCamelCase__ : str = "squaredcos_cap_v2" , ):
"""simple docstring"""
if beta_schedule != "squaredcos_cap_v2":
raise ValueError('UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'' )
UpperCamelCase = betas_for_alpha_bar(UpperCamelCase__ )
UpperCamelCase = 1.0 - self.betas
UpperCamelCase = torch.cumprod(self.alphas , dim=0 )
UpperCamelCase = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
UpperCamelCase = 1.0
# setable values
UpperCamelCase = None
UpperCamelCase = torch.from_numpy(np.arange(0 , UpperCamelCase__ )[::-1].copy() )
UpperCamelCase = variance_type
def A ( self : Dict , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[int] = None ):
"""simple docstring"""
return sample
def A ( self : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, torch.device] = None ):
"""simple docstring"""
UpperCamelCase = num_inference_steps
UpperCamelCase = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
UpperCamelCase = (np.arange(0 , UpperCamelCase__ ) * step_ratio).round()[::-1].copy().astype(np.intaa )
UpperCamelCase = torch.from_numpy(UpperCamelCase__ ).to(UpperCamelCase__ )
def A ( self : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Tuple=None ):
"""simple docstring"""
if prev_timestep is None:
UpperCamelCase = t - 1
UpperCamelCase = self.alphas_cumprod[t]
UpperCamelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCamelCase = 1 - alpha_prod_t
UpperCamelCase = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCamelCase = self.betas[t]
else:
UpperCamelCase = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCamelCase = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
UpperCamelCase = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
UpperCamelCase = torch.log(torch.clamp(UpperCamelCase__ , min=1E-2_0 ) )
UpperCamelCase = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
UpperCamelCase = variance.log()
UpperCamelCase = beta.log()
UpperCamelCase = (predicted_variance + 1) / 2
UpperCamelCase = frac * max_log + (1 - frac) * min_log
return variance
def A ( self : int , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : int , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : str=None , UpperCamelCase__ : bool = True , ):
"""simple docstring"""
UpperCamelCase = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
UpperCamelCase , UpperCamelCase = torch.split(UpperCamelCase__ , sample.shape[1] , dim=1 )
else:
UpperCamelCase = None
# 1. compute alphas, betas
if prev_timestep is None:
UpperCamelCase = t - 1
UpperCamelCase = self.alphas_cumprod[t]
UpperCamelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCamelCase = 1 - alpha_prod_t
UpperCamelCase = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCamelCase = self.betas[t]
UpperCamelCase = self.alphas[t]
else:
UpperCamelCase = 1 - alpha_prod_t / alpha_prod_t_prev
UpperCamelCase = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCamelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCamelCase = model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"""
' for the UnCLIPScheduler.' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCamelCase = torch.clamp(
UpperCamelCase__ , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCamelCase = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
UpperCamelCase = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCamelCase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
UpperCamelCase = 0
if t > 0:
UpperCamelCase = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=UpperCamelCase__ , device=model_output.device )
UpperCamelCase = self._get_variance(
UpperCamelCase__ , predicted_variance=UpperCamelCase__ , prev_timestep=UpperCamelCase__ , )
if self.variance_type == "fixed_small_log":
UpperCamelCase = variance
elif self.variance_type == "learned_range":
UpperCamelCase = (0.5 * variance).exp()
else:
raise ValueError(
f"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"""
' for the UnCLIPScheduler.' )
UpperCamelCase = variance * variance_noise
UpperCamelCase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=UpperCamelCase__ , pred_original_sample=UpperCamelCase__ )
def A ( self : int , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : torch.IntTensor , ):
"""simple docstring"""
UpperCamelCase = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
UpperCamelCase = timesteps.to(original_samples.device )
UpperCamelCase = alphas_cumprod[timesteps] ** 0.5
UpperCamelCase = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
UpperCamelCase = sqrt_alpha_prod.unsqueeze(-1 )
UpperCamelCase = (1 - alphas_cumprod[timesteps]) ** 0.5
UpperCamelCase = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
UpperCamelCase = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
UpperCamelCase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 28 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
_UpperCamelCase = None
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
_UpperCamelCase = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json',
'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json',
},
}
_UpperCamelCase = {
'facebook/mbart-large-en-ro': 1024,
'facebook/mbart-large-cc25': 1024,
}
# fmt: off
_UpperCamelCase = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
a_ =VOCAB_FILES_NAMES
a_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ =PRETRAINED_VOCAB_FILES_MAP
a_ =["""input_ids""", """attention_mask"""]
a_ =MBartTokenizer
a_ =[]
a_ =[]
def __init__( self : Optional[Any] , _a : Optional[int]=None , _a : Any=None , _a : Any="<s>" , _a : Optional[Any]="</s>" , _a : List[str]="</s>" , _a : List[Any]="<s>" , _a : Union[str, Any]="<unk>" , _a : str="<pad>" , _a : Any="<mask>" , _a : Optional[Any]=None , _a : str=None , _a : Tuple=None , **_a : Dict , ) -> List[str]:
# Mask token behave like a normal word, i.e. include the space before it
__lowerCamelCase : List[Any] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
super().__init__(
vocab_file=_a , tokenizer_file=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , src_lang=_a , tgt_lang=_a , additional_special_tokens=_a , **_a , )
__lowerCamelCase : Optional[Any] = vocab_file
__lowerCamelCase : List[str] = False if not self.vocab_file else True
__lowerCamelCase : str = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
__lowerCamelCase : Optional[Any] = {
lang_code: self.convert_tokens_to_ids(_a ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__lowerCamelCase : Optional[Any] = src_lang if src_lang is not None else 'en_XX'
__lowerCamelCase : int = self.convert_tokens_to_ids(self._src_lang )
__lowerCamelCase : List[str] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _lowercase ( self : List[Any] ) -> str:
return self._src_lang
@src_lang.setter
def _lowercase ( self : Union[str, Any] , _a : str ) -> None:
__lowerCamelCase : Union[str, Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _lowercase ( self : List[Any] , _a : List[int] , _a : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowercase ( self : int , _a : List[int] , _a : Optional[List[int]] = None ) -> List[int]:
__lowerCamelCase : Optional[int] = [self.sep_token_id]
__lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self : Optional[Any] , _a : Optional[Any] , _a : str , _a : Optional[str] , _a : Optional[str] , **_a : Optional[int] ) -> Any:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
__lowerCamelCase : Optional[Any] = src_lang
__lowerCamelCase : Dict = self(_a , add_special_tokens=_a , return_tensors=_a , **_a )
__lowerCamelCase : Tuple = self.convert_tokens_to_ids(_a )
__lowerCamelCase : Optional[Any] = tgt_lang_id
return inputs
def _lowercase ( self : Any , _a : List[str] , _a : str = "en_XX" , _a : Optional[List[str]] = None , _a : str = "ro_RO" , **_a : Tuple , ) -> BatchEncoding:
__lowerCamelCase : List[Any] = src_lang
__lowerCamelCase : str = tgt_lang
return super().prepare_seqaseq_batch(_a , _a , **_a )
def _lowercase ( self : List[Any] ) -> Any:
return self.set_src_lang_special_tokens(self.src_lang )
def _lowercase ( self : Dict ) -> Optional[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _lowercase ( self : Tuple , _a : List[str] ) -> None:
__lowerCamelCase : Tuple = self.convert_tokens_to_ids(_a )
__lowerCamelCase : Optional[Any] = []
__lowerCamelCase : List[str] = [self.eos_token_id, self.cur_lang_code]
__lowerCamelCase : Dict = self.convert_ids_to_tokens(self.prefix_tokens )
__lowerCamelCase : Optional[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
__lowerCamelCase : Union[str, Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _lowercase ( self : Optional[Any] , _a : str ) -> None:
__lowerCamelCase : Union[str, Any] = self.convert_tokens_to_ids(_a )
__lowerCamelCase : int = []
__lowerCamelCase : List[str] = [self.eos_token_id, self.cur_lang_code]
__lowerCamelCase : int = self.convert_ids_to_tokens(self.prefix_tokens )
__lowerCamelCase : Any = self.convert_ids_to_tokens(self.suffix_tokens )
__lowerCamelCase : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _lowercase ( self : Any , _a : str , _a : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory.' )
return
__lowerCamelCase : List[str] = os.path.join(
_a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 208 | 0 |
"""simple docstring"""
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
A_ : Any = "\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n"
A_ : Union[str, Any] = "\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n"
A_ : Optional[Any] = "\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric('mauve')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, homepage='https://github.com/krishnap25/mauve', inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': datasets.Value('string', id='sequence' ),
'references': datasets.Value('string', id='sequence' ),
} ), codebase_urls=['https://github.com/krishnap25/mauve'], reference_urls=[
'https://arxiv.org/abs/2102.01454',
'https://github.com/krishnap25/mauve',
], )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_="auto", lowerCamelCase_=-1, lowerCamelCase_=0.9, lowerCamelCase_=5, lowerCamelCase_=5_0_0, lowerCamelCase_="gpt2-large", lowerCamelCase_=-1, lowerCamelCase_=1_0_2_4, lowerCamelCase_=2_5, lowerCamelCase_=5, lowerCamelCase_=True, lowerCamelCase_=2_5, ):
'''simple docstring'''
lowerCamelCase__ : List[str] = compute_mauve(
p_text=lowerCamelCase_, q_text=lowerCamelCase_, p_features=lowerCamelCase_, q_features=lowerCamelCase_, p_tokens=lowerCamelCase_, q_tokens=lowerCamelCase_, num_buckets=lowerCamelCase_, pca_max_data=lowerCamelCase_, kmeans_explained_var=lowerCamelCase_, kmeans_num_redo=lowerCamelCase_, kmeans_max_iter=lowerCamelCase_, featurize_model_name=lowerCamelCase_, device_id=lowerCamelCase_, max_text_length=lowerCamelCase_, divergence_curve_discretization_size=lowerCamelCase_, mauve_scaling_factor=lowerCamelCase_, verbose=lowerCamelCase_, seed=lowerCamelCase_, )
return out
| 316 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
while second != 0:
lowerCamelCase__ : Tuple = first & second
first ^= second
lowerCamelCase__ : int = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : Tuple = int(input("Enter the first number: ").strip())
A_ : Union[str, Any] = int(input("Enter the second number: ").strip())
print(f"{add(first, second) = }")
| 316 | 1 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
class _snake_case ( a__ ):
snake_case__ = "encoder-decoder"
snake_case__ = True
def __init__( self : int , **UpperCAmelCase : Optional[Any] ):
super().__init__(**UpperCAmelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
__lowerCamelCase : Tuple = kwargs.pop("encoder" )
__lowerCamelCase : List[Any] = encoder_config.pop("model_type" )
__lowerCamelCase : Optional[int] = kwargs.pop("decoder" )
__lowerCamelCase : List[Any] = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
__lowerCamelCase : Any = AutoConfig.for_model(UpperCAmelCase , **UpperCAmelCase )
__lowerCamelCase : Tuple = AutoConfig.for_model(UpperCAmelCase , **UpperCAmelCase )
__lowerCamelCase : Any = True
@classmethod
def lowerCamelCase__ ( cls : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Dict ):
logger.info("Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" )
__lowerCamelCase : int = True
__lowerCamelCase : List[Any] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCAmelCase )
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase : int = copy.deepcopy(self.__dict__ )
__lowerCamelCase : List[Any] = self.encoder.to_dict()
__lowerCamelCase : Optional[int] = self.decoder.to_dict()
__lowerCamelCase : List[Any] = self.__class__.model_type
return output | 135 |
def a ( A__ : str , A__ : bool = False ) -> str:
"""simple docstring"""
if not isinstance(A__ , A__ ):
_lowercase =F'''Expected string as input, found {type(A__ )}'''
raise ValueError(A__ )
if not isinstance(A__ , A__ ):
_lowercase =F'''Expected boolean as use_pascal parameter, found {type(A__ )}'''
raise ValueError(A__ )
_lowercase =input_str.split('_' )
_lowercase =0 if use_pascal else 1
_lowercase =words[start_index:]
_lowercase =[word[0].upper() + word[1:] for word in words_to_capitalize]
_lowercase ='' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 205 | 0 |
"""simple docstring"""
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def UpperCAmelCase__ (snake_case__ : Features ):
"""simple docstring"""
_snake_case : Optional[Any] = np.inf
def set_batch_size(snake_case__ : FeatureType ) -> None:
nonlocal batch_size
if isinstance(a__ , a__ ):
_snake_case : int = min(a__ , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(a__ , a__ ):
_snake_case : Dict = min(a__ , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(a__ , a__ ) and feature.dtype == "binary":
_snake_case : Union[str, Any] = min(a__ , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(a__ , a__ )
return None if batch_size is np.inf else batch_size
class lowercase( _a ):
'''simple docstring'''
def __init__( self: Optional[int], a_: List[str], a_: Optional[int] = None, a_: List[str] = None, a_: Any = None, a_: List[Any] = False, a_: List[str] = False, a_: Optional[Any] = None, **a_: int, ):
'''simple docstring'''
super().__init__(
__lowerCAmelCase, split=__lowerCAmelCase, features=__lowerCAmelCase, cache_dir=__lowerCAmelCase, keep_in_memory=__lowerCAmelCase, streaming=__lowerCAmelCase, num_proc=__lowerCAmelCase, **__lowerCAmelCase, )
_snake_case : List[str] = path_or_paths if isinstance(__lowerCAmelCase, __lowerCAmelCase ) else {self.split: path_or_paths}
_snake_case : Optional[int] = _PACKAGED_DATASETS_MODULES["""parquet"""][1]
_snake_case : Dict = Parquet(
cache_dir=__lowerCAmelCase, data_files=__lowerCAmelCase, features=__lowerCAmelCase, hash=__lowerCAmelCase, **__lowerCAmelCase, )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
if self.streaming:
_snake_case : List[Any] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
_snake_case : int = None
_snake_case : int = None
_snake_case : Optional[Any] = None
_snake_case : List[str] = None
self.builder.download_and_prepare(
download_config=__lowerCAmelCase, download_mode=__lowerCAmelCase, verification_mode=__lowerCAmelCase, base_path=__lowerCAmelCase, num_proc=self.num_proc, )
_snake_case : str = self.builder.as_dataset(
split=self.split, verification_mode=__lowerCAmelCase, in_memory=self.keep_in_memory )
return dataset
class lowercase:
'''simple docstring'''
def __init__( self: int, a_: Dict, a_: int, a_: Optional[Any] = None, **a_: List[str], ):
'''simple docstring'''
_snake_case : str = dataset
_snake_case : int = path_or_buf
_snake_case : Optional[int] = batch_size or get_writer_batch_size(dataset.features )
_snake_case : List[Any] = parquet_writer_kwargs
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf, (str, bytes, os.PathLike) ):
with open(self.path_or_buf, """wb+""" ) as buffer:
_snake_case : Tuple = self._write(file_obj=__lowerCAmelCase, batch_size=__lowerCAmelCase, **self.parquet_writer_kwargs )
else:
_snake_case : Union[str, Any] = self._write(file_obj=self.path_or_buf, batch_size=__lowerCAmelCase, **self.parquet_writer_kwargs )
return written
def UpperCamelCase_ ( self: List[str], a_: Optional[Any], a_: Union[str, Any], **a_: List[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = 0
_snake_case : Optional[int] = parquet_writer_kwargs.pop("""path_or_buf""", __lowerCAmelCase )
_snake_case : Dict = self.dataset.features.arrow_schema
_snake_case : Tuple = pq.ParquetWriter(__lowerCAmelCase, schema=__lowerCAmelCase, **__lowerCAmelCase )
for offset in logging.tqdm(
range(0, len(self.dataset ), __lowerCAmelCase ), unit="""ba""", disable=not logging.is_progress_bar_enabled(), desc="""Creating parquet from Arrow format""", ):
_snake_case : int = query_table(
table=self.dataset._data, key=slice(__lowerCAmelCase, offset + batch_size ), indices=self.dataset._indices if self.dataset._indices is not None else None, )
writer.write_table(__lowerCAmelCase )
written += batch.nbytes
writer.close()
return written
| 371 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class lowercase( __a ):
'''simple docstring'''
lowercase__ = 42
lowercase__ = 42
class lowercase( nn.Module ):
'''simple docstring'''
lowercase__ = 42
lowercase__ = (16, 32, 96, 2_56)
lowercase__ = jnp.floataa
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[str] = nn.Conv(
self.block_out_channels[0], kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, )
_snake_case : int = []
for i in range(len(self.block_out_channels ) - 1 ):
_snake_case : int = self.block_out_channels[i]
_snake_case : Tuple = self.block_out_channels[i + 1]
_snake_case : Dict = nn.Conv(
a_, kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, )
blocks.append(a_ )
_snake_case : List[Any] = nn.Conv(
a_, kernel_size=(3, 3), strides=(2, 2), padding=((1, 1), (1, 1)), dtype=self.dtype, )
blocks.append(a_ )
_snake_case : Any = blocks
_snake_case : Optional[Any] = nn.Conv(
self.conditioning_embedding_channels, kernel_size=(3, 3), padding=((1, 1), (1, 1)), kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
def __call__( self: Optional[Any], a_: Optional[Any] ):
'''simple docstring'''
_snake_case : int = self.conv_in(a_ )
_snake_case : Optional[int] = nn.silu(a_ )
for block in self.blocks:
_snake_case : Tuple = block(a_ )
_snake_case : int = nn.silu(a_ )
_snake_case : Optional[int] = self.conv_out(a_ )
return embedding
@flax_register_to_config
class lowercase( nn.Module , __a , __a ):
'''simple docstring'''
lowercase__ = 32
lowercase__ = 4
lowercase__ = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
lowercase__ = False
lowercase__ = (3_20, 6_40, 12_80, 12_80)
lowercase__ = 2
lowercase__ = 8
lowercase__ = None
lowercase__ = 12_80
lowercase__ = 0.0
lowercase__ = False
lowercase__ = jnp.floataa
lowercase__ = True
lowercase__ = 0
lowercase__ = "rgb"
lowercase__ = (16, 32, 96, 2_56)
def UpperCamelCase_ ( self: int, a_: jax.random.KeyArray ):
'''simple docstring'''
_snake_case : str = (1, self.in_channels, self.sample_size, self.sample_size)
_snake_case : Optional[Any] = jnp.zeros(a_, dtype=jnp.floataa )
_snake_case : List[str] = jnp.ones((1,), dtype=jnp.intaa )
_snake_case : List[Any] = jnp.zeros((1, 1, self.cross_attention_dim), dtype=jnp.floataa )
_snake_case : Any = (1, 3, self.sample_size * 8, self.sample_size * 8)
_snake_case : Optional[int] = jnp.zeros(a_, dtype=jnp.floataa )
_snake_case , _snake_case : Tuple = jax.random.split(a_ )
_snake_case : str = {"""params""": params_rng, """dropout""": dropout_rng}
return self.init(a_, a_, a_, a_, a_ )["params"]
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Optional[int] = self.block_out_channels
_snake_case : Optional[int] = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
_snake_case : int = self.num_attention_heads or self.attention_head_dim
# input
_snake_case : Union[str, Any] = nn.Conv(
block_out_channels[0], kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, )
# time
_snake_case : int = FlaxTimesteps(
block_out_channels[0], flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.config.freq_shift )
_snake_case : Any = FlaxTimestepEmbedding(a_, dtype=self.dtype )
_snake_case : Optional[Any] = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0], block_out_channels=self.conditioning_embedding_out_channels, )
_snake_case : List[str] = self.only_cross_attention
if isinstance(a_, a_ ):
_snake_case : List[Any] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(a_, a_ ):
_snake_case : Optional[Any] = (num_attention_heads,) * len(self.down_block_types )
# down
_snake_case : List[str] = []
_snake_case : Tuple = []
_snake_case : int = block_out_channels[0]
_snake_case : Optional[Any] = nn.Conv(
a_, kernel_size=(1, 1), padding="""VALID""", kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(a_ )
for i, down_block_type in enumerate(self.down_block_types ):
_snake_case : List[Any] = output_channel
_snake_case : Any = block_out_channels[i]
_snake_case : List[str] = i == len(a_ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
_snake_case : Optional[int] = FlaxCrossAttnDownBlockaD(
in_channels=a_, out_channels=a_, dropout=self.dropout, num_layers=self.layers_per_block, num_attention_heads=num_attention_heads[i], add_downsample=not is_final_block, use_linear_projection=self.use_linear_projection, only_cross_attention=only_cross_attention[i], dtype=self.dtype, )
else:
_snake_case : List[Any] = FlaxDownBlockaD(
in_channels=a_, out_channels=a_, dropout=self.dropout, num_layers=self.layers_per_block, add_downsample=not is_final_block, dtype=self.dtype, )
down_blocks.append(a_ )
for _ in range(self.layers_per_block ):
_snake_case : List[Any] = nn.Conv(
a_, kernel_size=(1, 1), padding="""VALID""", kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(a_ )
if not is_final_block:
_snake_case : List[Any] = nn.Conv(
a_, kernel_size=(1, 1), padding="""VALID""", kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(a_ )
_snake_case : str = down_blocks
_snake_case : Union[str, Any] = controlnet_down_blocks
# mid
_snake_case : Tuple = block_out_channels[-1]
_snake_case : Optional[int] = FlaxUNetMidBlockaDCrossAttn(
in_channels=a_, dropout=self.dropout, num_attention_heads=num_attention_heads[-1], use_linear_projection=self.use_linear_projection, dtype=self.dtype, )
_snake_case : Tuple = nn.Conv(
a_, kernel_size=(1, 1), padding="""VALID""", kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
def __call__( self: str, a_: Any, a_: Tuple, a_: Any, a_: int, a_: float = 1.0, a_: bool = True, a_: bool = False, ):
'''simple docstring'''
_snake_case : Dict = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
_snake_case : List[Any] = jnp.flip(a_, axis=1 )
# 1. time
if not isinstance(a_, jnp.ndarray ):
_snake_case : Any = jnp.array([timesteps], dtype=jnp.intaa )
elif isinstance(a_, jnp.ndarray ) and len(timesteps.shape ) == 0:
_snake_case : Union[str, Any] = timesteps.astype(dtype=jnp.floataa )
_snake_case : List[str] = jnp.expand_dims(a_, 0 )
_snake_case : List[str] = self.time_proj(a_ )
_snake_case : str = self.time_embedding(a_ )
# 2. pre-process
_snake_case : List[str] = jnp.transpose(a_, (0, 2, 3, 1) )
_snake_case : List[Any] = self.conv_in(a_ )
_snake_case : Union[str, Any] = jnp.transpose(a_, (0, 2, 3, 1) )
_snake_case : Any = self.controlnet_cond_embedding(a_ )
sample += controlnet_cond
# 3. down
_snake_case : List[str] = (sample,)
for down_block in self.down_blocks:
if isinstance(a_, a_ ):
_snake_case , _snake_case : Optional[Any] = down_block(a_, a_, a_, deterministic=not train )
else:
_snake_case , _snake_case : Dict = down_block(a_, a_, deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
_snake_case : Dict = self.mid_block(a_, a_, a_, deterministic=not train )
# 5. contronet blocks
_snake_case : Tuple = ()
for down_block_res_sample, controlnet_block in zip(a_, self.controlnet_down_blocks ):
_snake_case : Any = controlnet_block(a_ )
controlnet_down_block_res_samples += (down_block_res_sample,)
_snake_case : List[Any] = controlnet_down_block_res_samples
_snake_case : int = self.controlnet_mid_block(a_ )
# 6. scaling
_snake_case : int = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=a_, mid_block_res_sample=a_ )
| 132 | 0 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
a =os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F"""{bindir}/../../examples/pytorch/translation"""):
from run_translation import main # noqa
set_seed(42)
a ='sshleifer/student_marian_en_ro_6_1'
a ='sshleifer/tiny-mbart'
@require_torch
class A_ ( SCREAMING_SNAKE_CASE ):
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Tuple=False ,SCREAMING_SNAKE_CASE__ : int=None ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : Dict=True ,SCREAMING_SNAKE_CASE__ : Optional[Any]=True ,SCREAMING_SNAKE_CASE__ : Any=True ,):
__lowerCamelCase : Any = self.run_trainer(
eval_steps=1 ,max_len=1_2 ,model_name=_snake_case ,num_train_epochs=1 ,distributed=_snake_case ,extra_args_str=_snake_case ,predict_with_generate=_snake_case ,do_train=_snake_case ,do_eval=_snake_case ,do_predict=_snake_case ,)
__lowerCamelCase : List[str] = TrainerState.load_from_json(os.path.join(_snake_case ,'trainer_state.json')).log_history
if not do_eval:
return
__lowerCamelCase : Tuple = [log for log in logs if 'eval_loss' in log.keys()]
__lowerCamelCase : Tuple = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
__lowerCamelCase : Union[str, Any] = eval_metrics[-1]
assert isinstance(last_step_stats['eval_bleu'] ,_snake_case)
assert not math.isnan(float(last_step_stats['eval_loss'])), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def lowerCAmelCase ( self : Any):
self.run_seqaseq_quick()
@require_torch_multi_gpu
def lowerCAmelCase ( self : str):
self.run_seqaseq_quick(distributed=_snake_case)
@require_torch_multi_gpu
def lowerCAmelCase ( self : Optional[Any]):
self.run_seqaseq_quick(distributed=_snake_case)
@unittest.skip('Requires an update of the env running those tests')
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase ( self : List[Any]):
self.run_seqaseq_quick(distributed=_snake_case ,extra_args_str='--sharded_ddp simple')
@unittest.skip('Requires an update of the env running those tests')
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase ( self : Optional[Any]):
self.run_seqaseq_quick(distributed=_snake_case ,extra_args_str='--sharded_ddp simple --fp16')
@unittest.skip('Requires an update of the env running those tests')
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase ( self : List[Any]):
self.run_seqaseq_quick(distributed=_snake_case ,extra_args_str='--sharded_ddp zero_dp_2' ,predict_with_generate=_snake_case)
@unittest.skip('Requires an update of the env running those tests')
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase ( self : Tuple):
self.run_seqaseq_quick(
distributed=_snake_case ,extra_args_str='--sharded_ddp zero_dp_2 --fp16' ,predict_with_generate=_snake_case)
@require_apex
@require_torch_gpu
def lowerCAmelCase ( self : str):
self.run_seqaseq_quick(distributed=_snake_case ,extra_args_str='--fp16 --fp16_backend=apex')
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=_snake_case ,extra_args_str='--fp16 --fp16_backend=apex')
@parameterized.expand(['base', 'low', 'high', 'mixed'])
@require_torch_multi_gpu
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : Tuple):
__lowerCamelCase : int = {
# test with the default log_level - should be info and thus log info once
'base': {'extra_args_str': '', 'n_matches': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'low': {'extra_args_str': '--log_level debug --log_level_replica debug', 'n_matches': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'high': {'extra_args_str': '--log_level error --log_level_replica debug', 'n_matches': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'mixed': {'extra_args_str': '--log_level error --log_level_replica error', 'n_matches': 0},
}
__lowerCamelCase : str = experiments[experiment_id]
__lowerCamelCase : int = {'distributed': True, 'predict_with_generate': False, 'do_eval': False, 'do_predict': False}
__lowerCamelCase : Union[str, Any] = 'Running training'
with CaptureStderr() as cl:
self.run_seqaseq_quick(**_snake_case ,extra_args_str=data['extra_args_str'])
__lowerCamelCase : List[Any] = len(re.findall(_snake_case ,cl.err))
self.assertEqual(_snake_case ,data['n_matches'])
@slow
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : str = self.run_trainer(
eval_steps=2 ,max_len=1_2_8 ,model_name=_snake_case ,learning_rate=3E-4 ,num_train_epochs=1_0 ,distributed=_snake_case ,)
# Check metrics
__lowerCamelCase : Optional[int] = TrainerState.load_from_json(os.path.join(_snake_case ,'trainer_state.json')).log_history
__lowerCamelCase : Dict = [log for log in logs if 'eval_loss' in log.keys()]
__lowerCamelCase : Optional[Any] = eval_metrics[0]
__lowerCamelCase : Any = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['eval_bleu'] ,_snake_case)
# test if do_predict saves generations and metrics
__lowerCamelCase : Union[str, Any] = os.listdir(_snake_case)
__lowerCamelCase : List[str] = {os.path.basename(_snake_case) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def lowerCAmelCase ( self : List[Any]):
from transformers.training_args import OptimizerNames
def train_and_return_metrics(SCREAMING_SNAKE_CASE__ : Any) -> Tuple[int, float]:
__lowerCamelCase : List[str] = '--skip_memory_metrics 0'
__lowerCamelCase : Dict = self.run_trainer(
max_len=1_2_8 ,model_name=_snake_case ,learning_rate=3E-4 ,num_train_epochs=1 ,optim=_snake_case ,distributed=_snake_case ,extra_args_str=_snake_case ,do_eval=_snake_case ,do_predict=_snake_case ,n_gpus_to_use=1 ,)
# Check metrics
__lowerCamelCase : Optional[Any] = TrainerState.load_from_json(Path(_snake_case ,'trainer_state.json')).log_history
__lowerCamelCase : str = int(logs[0]['train_mem_gpu_peaked_delta'] / 2**2_0)
__lowerCamelCase : Any = int(logs[0]['train_mem_gpu_alloc_delta'] / 2**2_0)
__lowerCamelCase : Union[str, Any] = logs[0]['train_loss']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value)
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value)
__lowerCamelCase : Optional[int] = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
__lowerCamelCase : List[str] = gpu_peak_mem_orig + gpu_alloc_mem_orig
__lowerCamelCase : Tuple = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
__lowerCamelCase : Tuple = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
__lowerCamelCase : Any = 1_2_0
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
_snake_case ,_snake_case ,'should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'
F" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"
F" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB" ,)
self.assertGreater(
_snake_case ,_snake_case ,'should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'
F" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"
F" gpu_total_mem_bnb={gpu_total_mem_bnb}MB" ,)
self.assertEqual(
_snake_case ,_snake_case ,F"loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}")
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Tuple = 3E-3 ,SCREAMING_SNAKE_CASE__ : Optional[int] = "adafactor" ,SCREAMING_SNAKE_CASE__ : Optional[int] = False ,SCREAMING_SNAKE_CASE__ : str = None ,SCREAMING_SNAKE_CASE__ : Dict = 0 ,SCREAMING_SNAKE_CASE__ : str = True ,SCREAMING_SNAKE_CASE__ : Tuple = True ,SCREAMING_SNAKE_CASE__ : Dict = True ,SCREAMING_SNAKE_CASE__ : List[str] = True ,SCREAMING_SNAKE_CASE__ : Optional[int] = None ,):
__lowerCamelCase : Dict = self.test_file_dir / '../fixtures/tests_samples/wmt_en_ro'
__lowerCamelCase : Dict = self.get_auto_remove_tmp_dir()
__lowerCamelCase : Union[str, Any] = F"\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(_snake_case)}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(_snake_case)}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n ".split()
__lowerCamelCase : List[str] = F"\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(_snake_case)}\n ".split()
__lowerCamelCase : int = '\n --do_predict\n '.split()
__lowerCamelCase : Tuple = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F"--optim {optim}".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
__lowerCamelCase : Any = get_gpu_count()
__lowerCamelCase : Tuple = get_torch_dist_unique_port()
__lowerCamelCase : List[Any] = F"\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n ".split()
__lowerCamelCase : Dict = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(_snake_case ,env=self.get_env())
else:
__lowerCamelCase : Union[str, Any] = ['run_translation.py'] + args
with patch.object(_snake_case ,'argv' ,_snake_case):
main()
return output_dir
| 73 |
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
A : str = logging.get_logger(__name__)
class __A( a ):
def __init__( self , **_snake_case ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['''bs4'''] )
super().__init__(**_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> int:
'''simple docstring'''
__a = []
__a = []
__a = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
__a = parent.find_all(child.name , recursive=_snake_case )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(_snake_case ) else next(i for i, s in enumerate(_snake_case , 1 ) if s is child ) )
__a = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = BeautifulSoup(_snake_case , '''html.parser''' )
__a = []
__a = []
__a = []
for element in html_code.descendants:
if type(_snake_case ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
__a = html.unescape(_snake_case ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(_snake_case )
__a , __a = self.xpath_soup(_snake_case )
stringaxtag_seq.append(_snake_case )
stringaxsubs_seq.append(_snake_case )
if len(_snake_case ) != len(_snake_case ):
raise ValueError('''Number of doc strings and xtags does not correspond''' )
if len(_snake_case ) != len(_snake_case ):
raise ValueError('''Number of doc strings and xsubs does not correspond''' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = ''''''
for tagname, subs in zip(_snake_case , _snake_case ):
xpath += F"""/{tagname}"""
if subs != 0:
xpath += F"""[{subs}]"""
return xpath
def __call__( self , _snake_case ) -> BatchFeature:
'''simple docstring'''
__a = False
# Check that strings has a valid type
if isinstance(_snake_case , _snake_case ):
__a = True
elif isinstance(_snake_case , (list, tuple) ):
if len(_snake_case ) == 0 or isinstance(html_strings[0] , _snake_case ):
__a = True
if not valid_strings:
raise ValueError(
'''HTML strings must of type `str`, `List[str]` (batch of examples), '''
F"""but is of type {type(_snake_case )}.""" )
__a = bool(isinstance(_snake_case , (list, tuple) ) and (isinstance(html_strings[0] , _snake_case )) )
if not is_batched:
__a = [html_strings]
# Get nodes + xpaths
__a = []
__a = []
for html_string in html_strings:
__a , __a , __a = self.get_three_from_single(_snake_case )
nodes.append(_snake_case )
__a = []
for node, tag_list, sub_list in zip(_snake_case , _snake_case , _snake_case ):
__a = self.construct_xpath(_snake_case , _snake_case )
xpath_strings.append(_snake_case )
xpaths.append(_snake_case )
# return as Dict
__a = {'''nodes''': nodes, '''xpaths''': xpaths}
__a = BatchFeature(data=_snake_case , tensor_type=_snake_case )
return encoded_inputs | 6 | 0 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE__ = random.Random()
def lowercase__ ( __UpperCamelCase , __UpperCamelCase=1.0 , __UpperCamelCase=None , __UpperCamelCase=None )-> Any:
if rng is None:
UpperCamelCase = global_rng
UpperCamelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class a_ ( unittest.TestCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=2000 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=16000 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=80 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE="hann_window" , _SCREAMING_SNAKE_CASE=80 , _SCREAMING_SNAKE_CASE=7600 , _SCREAMING_SNAKE_CASE=1e-10 , _SCREAMING_SNAKE_CASE=True , ) -> Tuple:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = min_seq_length
UpperCamelCase = max_seq_length
UpperCamelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase = feature_size
UpperCamelCase = padding_value
UpperCamelCase = sampling_rate
UpperCamelCase = do_normalize
UpperCamelCase = num_mel_bins
UpperCamelCase = hop_length
UpperCamelCase = win_length
UpperCamelCase = win_function
UpperCamelCase = fmin
UpperCamelCase = fmax
UpperCamelCase = mel_floor
UpperCamelCase = return_attention_mask
def A__ ( self ) -> str:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def A__ ( self , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False ) -> List[str]:
"""simple docstring"""
def _flatten(_SCREAMING_SNAKE_CASE ):
return list(itertools.chain(*_SCREAMING_SNAKE_CASE ) )
if equal_length:
UpperCamelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
UpperCamelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCamelCase = [np.asarray(_SCREAMING_SNAKE_CASE ) for x in speech_inputs]
return speech_inputs
def A__ ( self , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]:
"""simple docstring"""
if equal_length:
UpperCamelCase = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCamelCase = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCamelCase = [np.asarray(_SCREAMING_SNAKE_CASE ) for x in speech_inputs]
return speech_inputs
@require_torch
class a_ ( lowerCamelCase , unittest.TestCase ):
lowercase = SpeechTaFeatureExtractor
def A__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = SpeechTaFeatureExtractionTester(self )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
self.assertTrue(np.all(np.mean(_SCREAMING_SNAKE_CASE , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(_SCREAMING_SNAKE_CASE , axis=0 ) - 1 ) < 1e-3 ) )
def A__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCamelCase = [np.asarray(_SCREAMING_SNAKE_CASE ) for speech_input in speech_inputs]
# Test not batched input
UpperCamelCase = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
UpperCamelCase = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# Test batched
UpperCamelCase = feat_extract(_SCREAMING_SNAKE_CASE , return_tensors="""np""" ).input_values
UpperCamelCase = feat_extract(_SCREAMING_SNAKE_CASE , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
def A__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCamelCase = ["""longest""", """max_length""", """do_not_pad"""]
UpperCamelCase = [None, 1600, None]
for max_length, padding in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCamelCase = feat_extract(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , return_tensors="""np""" )
UpperCamelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase = range(800 , 1400 , 200 )
UpperCamelCase = [floats_list((1, x) )[0] for x in lengths]
UpperCamelCase = ["""longest""", """max_length""", """do_not_pad"""]
UpperCamelCase = [None, 1600, None]
for max_length, padding in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCamelCase = feat_extract(_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE )
UpperCamelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def A__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCamelCase = feat_extract(
_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=1000 , padding="""max_length""" , return_tensors="""np""" )
UpperCamelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def A__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCamelCase = feat_extract(
_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=1000 , padding="""longest""" , return_tensors="""np""" )
UpperCamelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
UpperCamelCase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCamelCase = feat_extract(
_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=2000 , padding="""longest""" , return_tensors="""np""" )
UpperCamelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase = np.random.rand(100 ).astype(np.floataa )
UpperCamelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
UpperCamelCase = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def A__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCamelCase = [np.asarray(_SCREAMING_SNAKE_CASE ) for speech_input in speech_inputs]
# Test feature size
UpperCamelCase = feature_extractor(audio_target=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors="""np""" ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
UpperCamelCase = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_values
UpperCamelCase = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# Test batched
UpperCamelCase = feature_extractor(_SCREAMING_SNAKE_CASE , return_tensors="""np""" ).input_values
UpperCamelCase = feature_extractor(_SCREAMING_SNAKE_CASE , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCamelCase = np.asarray(_SCREAMING_SNAKE_CASE )
UpperCamelCase = feature_extractor(_SCREAMING_SNAKE_CASE , return_tensors="""np""" ).input_values
UpperCamelCase = feature_extractor(_SCREAMING_SNAKE_CASE , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.feat_extract_tester.prepare_inputs_for_target()
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
UpperCamelCase = feat_extract.model_input_names[0]
UpperCamelCase = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) for x, y in zip(_SCREAMING_SNAKE_CASE , processed_features[input_name] ) ) )
UpperCamelCase = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_SCREAMING_SNAKE_CASE )
UpperCamelCase = BatchFeature({input_name: speech_inputs} , tensor_type="""np""" )
UpperCamelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCamelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
UpperCamelCase = feat_extract.model_input_names[0]
UpperCamelCase = BatchFeature({input_name: speech_inputs} , tensor_type="""pt""" )
UpperCamelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCamelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def A__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
UpperCamelCase = self.feat_extract_tester.prepare_inputs_for_target()
UpperCamelCase = feat_extract.model_input_names[0]
UpperCamelCase = BatchFeature({input_name: speech_inputs} )
UpperCamelCase = feat_extract.num_mel_bins # hack!
UpperCamelCase = feat_extract.pad(_SCREAMING_SNAKE_CASE , padding="""longest""" , return_tensors="""np""" )[input_name]
UpperCamelCase = feat_extract.pad(_SCREAMING_SNAKE_CASE , padding="""longest""" , return_tensors="""pt""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def A__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = self.feat_extract_dict
UpperCamelCase = True
UpperCamelCase = self.feature_extraction_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.feat_extract_tester.prepare_inputs_for_target()
UpperCamelCase = [len(_SCREAMING_SNAKE_CASE ) for x in speech_inputs]
UpperCamelCase = feat_extract.model_input_names[0]
UpperCamelCase = BatchFeature({input_name: speech_inputs} )
UpperCamelCase = feat_extract.num_mel_bins # hack!
UpperCamelCase = feat_extract.pad(_SCREAMING_SNAKE_CASE , padding="""longest""" , return_tensors="""np""" )
self.assertIn("""attention_mask""" , _SCREAMING_SNAKE_CASE )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = self.feat_extract_dict
UpperCamelCase = True
UpperCamelCase = self.feature_extraction_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.feat_extract_tester.prepare_inputs_for_target()
UpperCamelCase = [len(_SCREAMING_SNAKE_CASE ) for x in speech_inputs]
UpperCamelCase = feat_extract.model_input_names[0]
UpperCamelCase = BatchFeature({input_name: speech_inputs} )
UpperCamelCase = min(_SCREAMING_SNAKE_CASE )
UpperCamelCase = feat_extract.num_mel_bins # hack!
UpperCamelCase = feat_extract.pad(
_SCREAMING_SNAKE_CASE , padding="""max_length""" , max_length=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , return_tensors="""np""" )
self.assertIn("""attention_mask""" , _SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
from datasets import load_dataset
UpperCamelCase = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
UpperCamelCase = ds.sort("""id""" ).select(range(_SCREAMING_SNAKE_CASE ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = torch.tensor(
[2.38_04e-03, 2.07_52e-03, 1.98_36e-03, 2.10_57e-03, 1.61_74e-03,
3.05_18e-04, 9.15_53e-05, 3.35_69e-04, 9.76_56e-04, 1.83_11e-03,
2.01_42e-03, 2.10_57e-03, 1.73_95e-03, 4.57_76e-04, -3.96_73e-04,
4.57_76e-04, 1.00_71e-03, 9.15_53e-05, 4.88_28e-04, 1.15_97e-03,
7.32_42e-04, 9.46_04e-04, 1.80_05e-03, 1.83_11e-03, 8.85_01e-04,
4.27_25e-04, 4.88_28e-04, 7.32_42e-04, 1.09_86e-03, 2.10_57e-03] )
# fmt: on
UpperCamelCase = self._load_datasamples(1 )
UpperCamelCase = SpeechTaFeatureExtractor()
UpperCamelCase = feature_extractor(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 93680) )
self.assertTrue(torch.allclose(input_values[0, :30] , _SCREAMING_SNAKE_CASE , atol=1e-6 ) )
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = torch.tensor(
[-2.6_8_7_0, -3.0_1_0_4, -3.1_3_5_6, -3.5_3_5_2, -3.0_0_4_4, -3.0_3_5_3, -3.4_7_1_9, -3.6_7_7_7,
-3.1_5_2_0, -2.9_4_3_5, -2.6_5_5_3, -2.8_7_9_5, -2.9_9_4_4, -2.5_9_2_1, -3.0_2_7_9, -3.0_3_8_6,
-3.0_8_6_4, -3.1_2_9_1, -3.2_3_5_3, -2.7_4_4_4, -2.6_8_3_1, -2.7_2_8_7, -3.1_7_6_1, -3.1_5_7_1,
-3.2_7_2_6, -3.0_5_8_2, -3.1_0_0_7, -3.4_5_3_3, -3.4_6_9_5, -3.0_9_9_8] )
# fmt: on
UpperCamelCase = self._load_datasamples(1 )
UpperCamelCase = SpeechTaFeatureExtractor()
UpperCamelCase = feature_extractor(audio_target=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 183 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a_ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=[32, 64, 128] , _SCREAMING_SNAKE_CASE=[1, 2, 1] , _SCREAMING_SNAKE_CASE=[2, 2, 4] , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=["stage1", "stage2"] , _SCREAMING_SNAKE_CASE=[1, 2] , ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = embed_dim
UpperCamelCase = hidden_sizes
UpperCamelCase = depths
UpperCamelCase = num_heads
UpperCamelCase = window_size
UpperCamelCase = mlp_ratio
UpperCamelCase = qkv_bias
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = drop_path_rate
UpperCamelCase = hidden_act
UpperCamelCase = use_absolute_embeddings
UpperCamelCase = patch_norm
UpperCamelCase = layer_norm_eps
UpperCamelCase = initializer_range
UpperCamelCase = is_training
UpperCamelCase = scope
UpperCamelCase = use_labels
UpperCamelCase = type_sequence_label_size
UpperCamelCase = encoder_stride
UpperCamelCase = out_features
UpperCamelCase = out_indices
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def A__ ( self ) -> str:
"""simple docstring"""
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase = FocalNetModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = model(_SCREAMING_SNAKE_CASE )
UpperCamelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCamelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = FocalNetBackbone(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = model(_SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
UpperCamelCase = None
UpperCamelCase = FocalNetBackbone(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = model(_SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = FocalNetForMaskedImageModeling(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCamelCase = 1
UpperCamelCase = FocalNetForMaskedImageModeling(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.type_sequence_label_size
UpperCamelCase = FocalNetForImageClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase = 1
UpperCamelCase = FocalNetForImageClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase ,UpperCamelCase ,UpperCamelCase = config_and_inputs
UpperCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a_ ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowercase = (
{"""feature-extraction""": FocalNetModel, """image-classification""": FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase = FocalNetModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , embed_dim=37 , has_text_modality=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> List[Any]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self ) -> Tuple:
"""simple docstring"""
return
def A__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
@unittest.skip(reason="""FocalNet does not use inputs_embeds""" )
def A__ ( self ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""" )
def A__ ( self ) -> int:
"""simple docstring"""
pass
def A__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def A__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
UpperCamelCase = outputs.hidden_states
UpperCamelCase = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# FocalNet has a different seq_length
UpperCamelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
UpperCamelCase = outputs.reshaped_hidden_states
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = reshaped_hidden_states[0].shape
UpperCamelCase = (
reshaped_hidden_states[0].view(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
UpperCamelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = 3
UpperCamelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCamelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCamelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCamelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
UpperCamelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
@slow
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = FocalNetModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = _config_zero_init(_SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
UpperCamelCase = model_class(config=_SCREAMING_SNAKE_CASE )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@require_vision
@require_torch
class a_ ( unittest.TestCase ):
@cached_property
def A__ ( self ) -> List[str]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None
@slow
def A__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.default_image_processor
UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
UpperCamelCase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
UpperCamelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class a_ ( lowerCamelCase , unittest.TestCase ):
lowercase = (FocalNetBackbone,) if is_torch_available() else ()
lowercase = FocalNetConfig
lowercase = False
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = FocalNetModelTester(self )
| 183 | 1 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = str(lowerCAmelCase_ )
return n == n[::-1]
def UpperCAmelCase__ (lowerCAmelCase_ = 100_0000 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
for i in range(1 , lowerCAmelCase_ ):
if is_palindrome(lowerCAmelCase_ ) and is_palindrome(bin(lowerCAmelCase_ ).split("b" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 54 | import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , '''num_attention_heads''' ) )
class _UpperCamelCase :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=64 , lowerCAmelCase__=3 , lowerCAmelCase__=3 , lowerCAmelCase__=2 , lowerCAmelCase__=1 , lowerCAmelCase__=16 , lowerCAmelCase__=[1_28, 2_56, 3_84] , lowerCAmelCase__=[4, 6, 8] , lowerCAmelCase__=[2, 3, 4] , lowerCAmelCase__=[16, 16, 16] , lowerCAmelCase__=0 , lowerCAmelCase__=[2, 2, 2] , lowerCAmelCase__=[2, 2, 2] , lowerCAmelCase__=0.02 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=2 , ) -> Tuple:
'''simple docstring'''
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = num_channels
__lowercase = kernel_size
__lowercase = stride
__lowercase = padding
__lowercase = hidden_sizes
__lowercase = num_attention_heads
__lowercase = depths
__lowercase = key_dim
__lowercase = drop_path_rate
__lowercase = patch_size
__lowercase = attention_ratio
__lowercase = mlp_ratio
__lowercase = initializer_range
__lowercase = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
__lowercase = is_training
__lowercase = use_labels
__lowercase = num_labels
__lowercase = initializer_range
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.num_labels )
__lowercase = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
__lowercase = LevitModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__lowercase = model(lowerCAmelCase__ )
__lowercase = (self.image_size, self.image_size)
__lowercase , __lowercase = image_size[0], image_size[1]
for _ in range(4 ):
__lowercase = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
__lowercase = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
'''simple docstring'''
__lowercase = self.num_labels
__lowercase = LevitForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__lowercase = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( _UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
__a : int = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
__a : List[str] = (
{
'''feature-extraction''': LevitModel,
'''image-classification''': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__a : int = False
__a : Dict = False
__a : Optional[Any] = False
__a : Optional[int] = False
__a : Dict = False
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
__lowercase = LevitModelTester(self )
__lowercase = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
return
@unittest.skip(reason='''Levit does not use inputs_embeds''' )
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''Levit does not support input and output embeddings''' )
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason='''Levit does not output attentions''' )
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowerCAmelCase__ )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
def check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
__lowercase = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
__lowercase = outputs.hidden_states
__lowercase = len(self.model_tester.depths ) + 1
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
__lowercase = (self.model_tester.image_size, self.model_tester.image_size)
__lowercase , __lowercase = image_size[0], image_size[1]
for _ in range(4 ):
__lowercase = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
__lowercase = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> str:
'''simple docstring'''
__lowercase = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
if not self.model_tester.is_training:
return
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowerCAmelCase__ )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
__lowercase = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.train()
__lowercase = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
__lowercase = model(**lowerCAmelCase__ ).loss
loss.backward()
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__lowercase = False
__lowercase = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCAmelCase__ ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
__lowercase = model_class(lowerCAmelCase__ )
model.gradient_checkpointing_enable()
model.to(lowerCAmelCase__ )
model.train()
__lowercase = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
__lowercase = model(**lowerCAmelCase__ ).loss
loss.backward()
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = [
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowerCAmelCase__ ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"Testing {model_class} with {problem_type['title']}" ):
__lowercase = problem_type['''title''']
__lowercase = problem_type['''num_labels''']
__lowercase = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.train()
__lowercase = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if problem_type["num_labels"] > 1:
__lowercase = inputs['''labels'''].unsqueeze(1 ).repeat(1 , problem_type['''num_labels'''] )
__lowercase = inputs['''labels'''].to(problem_type['''dtype'''] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowerCAmelCase__ ) as warning_list:
__lowercase = model(**lowerCAmelCase__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"Something is going wrong in the regression problem: intercepted {w.message}" )
loss.backward()
@slow
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = LevitModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def UpperCAmelCase ( ):
"""simple docstring"""
__lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
lowerCAmelCase__ )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=lowerCAmelCase__ , return_tensors='''pt''' ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
__lowercase = model(**lowerCAmelCase__ )
# verify the logits
__lowercase = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
__lowercase = torch.tensor([1.0448, -0.3745, -1.8317] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) ) | 210 | 0 |
'''simple docstring'''
def __lowerCamelCase ( A__ , A__ ) -> bool:
"""simple docstring"""
UpperCamelCase = len(A__ )
UpperCamelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
UpperCamelCase = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
UpperCamelCase = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
UpperCamelCase = subset[i - 1][j]
if arr[i - 1] <= j:
UpperCamelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 249 |
'''simple docstring'''
def __lowerCamelCase ( A__ , A__ , A__ , A__ , A__ , A__ ) -> int:
"""simple docstring"""
if index == r:
for j in range(A__ ):
print(data[j] , end=' ' )
print(' ' )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
UpperCamelCase = arr[i]
combination_util(A__ , A__ , A__ , index + 1 , A__ , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(A__ , A__ , A__ , A__ , A__ , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def __lowerCamelCase ( A__ , A__ , A__ ) -> Union[str, Any]:
"""simple docstring"""
# A temporary array to store all combination one by one
UpperCamelCase = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(A__ , A__ , A__ , 0 , A__ , 0 )
if __name__ == "__main__":
# Driver code to check the function above
_lowerCamelCase : Optional[Any] = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 249 | 1 |
from itertools import permutations
def lowerCamelCase_ ( UpperCamelCase__ : tuple ) -> bool:
"""simple docstring"""
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
__lowerCamelCase = [7, 11, 13, 17]
for i, test in enumerate(UpperCamelCase__ ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def lowerCamelCase_ ( UpperCamelCase__ : int = 10 ) -> int:
"""simple docstring"""
return sum(
int(''.join(map(UpperCamelCase__ , UpperCamelCase__ ) ) )
for num in permutations(range(UpperCamelCase__ ) )
if is_substring_divisible(UpperCamelCase__ ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 90 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ = {"""configuration_ibert""": ["""IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """IBertConfig""", """IBertOnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""IBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""IBertForMaskedLM""",
"""IBertForMultipleChoice""",
"""IBertForQuestionAnswering""",
"""IBertForSequenceClassification""",
"""IBertForTokenClassification""",
"""IBertModel""",
"""IBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 96 | 0 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
lowercase_ = TypeVar('T')
class A_ ( Generic[T] ):
'''simple docstring'''
def __init__( self: int , a: T ):
__lowerCamelCase : List[str] = data
__lowerCamelCase : Node[T] | None = None
def __str__( self: Tuple ):
return F'{self.data}'
class A_ ( Generic[T] ):
'''simple docstring'''
def __init__( self: Any ):
__lowerCamelCase : Node[T] | None = None
def __iter__( self: Tuple ):
__lowerCamelCase : Dict = self.top
while node:
yield node.data
__lowerCamelCase : str = node.next
def __str__( self: Any ):
return "->".join([str(a ) for item in self] )
def __len__( self: Any ):
return len(tuple(iter(self ) ) )
def _snake_case ( self: List[str] ):
return self.top is None
def _snake_case ( self: List[Any] , a: T ):
__lowerCamelCase : Union[str, Any] = Node(a )
if not self.is_empty():
__lowerCamelCase : Union[str, Any] = self.top
__lowerCamelCase : Optional[int] = node
def _snake_case ( self: Tuple ):
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , a )
__lowerCamelCase : Tuple = self.top
__lowerCamelCase : Optional[Any] = self.top.next
return pop_node.data
def _snake_case ( self: int ):
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def _snake_case ( self: Tuple ):
__lowerCamelCase : Tuple = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 351 |
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
lowercase_ = logging.get_logger(__name__)
class A_ ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self: List[str] , *a: List[Any] , **a: Optional[Any] ):
warnings.warn(
'The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ChineseCLIPImageProcessor instead.' , a , )
super().__init__(*a , **a )
| 194 | 0 |
def _UpperCamelCase ( lowercase__ , lowercase__ ):
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
__SCREAMING_SNAKE_CASE : str = str(bin(lowercase__ ) )[2:] # remove the leading "0b"
__SCREAMING_SNAKE_CASE : Union[str, Any] = str(bin(lowercase__ ) )[2:] # remove the leading "0b"
__SCREAMING_SNAKE_CASE : List[Any] = max(len(lowercase__ ) , len(lowercase__ ) )
return "0b" + "".join(
str(int(char_a == '''1''' and char_b == '''1''' ) )
for char_a, char_b in zip(a_binary.zfill(lowercase__ ) , b_binary.zfill(lowercase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9 |
'''simple docstring'''
from __future__ import annotations
from typing import TypedDict
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : str
__lowerCamelCase : int
def lowerCAmelCase (__A):
"""simple docstring"""
if not isinstance(__A , __A):
raise TypeError('''The parameter s type must be str.''')
return [s[i:] + s[:i] for i in range(len(__A))]
def lowerCAmelCase (__A):
"""simple docstring"""
if not isinstance(__A , __A):
raise TypeError('''The parameter s type must be str.''')
if not s:
raise ValueError('''The parameter s must not be empty.''')
_a = all_rotations(__A)
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_a = {
"bwt_string": "".join([word[-1] for word in rotations]),
"idx_original_string": rotations.index(__A),
}
return response
def lowerCAmelCase (__A , __A):
"""simple docstring"""
if not isinstance(__A , __A):
raise TypeError('''The parameter bwt_string type must be str.''')
if not bwt_string:
raise ValueError('''The parameter bwt_string must not be empty.''')
try:
_a = int(__A)
except ValueError:
raise TypeError(
'''The parameter idx_original_string type must be int or passive'''
''' of cast to int.''')
if idx_original_string < 0:
raise ValueError('''The parameter idx_original_string must not be lower than 0.''')
if idx_original_string >= len(__A):
raise ValueError(
'''The parameter idx_original_string must be lower than''' ''' len(bwt_string).''')
_a = [''''''] * len(__A)
for _ in range(len(__A)):
for i in range(len(__A)):
_a = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
lowercase_ = "Provide a string that I will generate its BWT transform: "
lowercase_ = input(entry_msg).strip()
lowercase_ = bwt_transform(s)
print(
F"""Burrows Wheeler transform for string '{s}' results """
F"""in '{result['bwt_string']}'"""
)
lowercase_ = reverse_bwt(result["bwt_string"], result["idx_original_string"])
print(
F"""Reversing Burrows Wheeler transform for entry '{result['bwt_string']}' """
F"""we get original string '{original_string}'"""
)
| 211 | 0 |
"""simple docstring"""
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
UpperCAmelCase =(
"4S 3H 2C 7S 5H",
"9D 8H 2C 6S 7H",
"2D 6D 9D TH 7D",
"TC 8C 2S JH 6C",
"JH 8S TH AH QH",
"TS KS 5S 9S AC",
"KD 6S 9D TH AD",
"KS 8D 4D 9S 4S", # pair
"8C 4S KH JS 4D", # pair
"QH 8H KD JH 8S", # pair
"KC 4H KS 2H 8D", # pair
"KD 4S KC 3H 8S", # pair
"AH 8S AS KC JH", # pair
"3H 4C 4H 3S 2H", # 2 pairs
"5S 5D 2C KH KH", # 2 pairs
"3C KH 5D 5S KH", # 2 pairs
"AS 3C KH AD KH", # 2 pairs
"7C 7S 3S 7H 5S", # 3 of a kind
"7C 7S KH 2H 7H", # 3 of a kind
"AC KH QH AH AS", # 3 of a kind
"2H 4D 3C AS 5S", # straight (low ace)
"3C 5C 4C 2C 6H", # straight
"6S 8S 7S 5H 9H", # straight
"JS QS 9H TS KH", # straight
"QC KH TS JS AH", # straight (high ace)
"8C 9C 5C 3C TC", # flush
"3S 8S 9S 5S KS", # flush
"4C 5C 9C 8C KC", # flush
"JH 8H AH KH QH", # flush
"3D 2H 3H 2C 2D", # full house
"2H 2C 3S 3H 3D", # full house
"KH KC 3S 3H 3D", # full house
"JC 6H JS JD JH", # 4 of a kind
"JC 7H JS JD JH", # 4 of a kind
"JC KH JS JD JH", # 4 of a kind
"2S AS 4S 5S 3S", # straight flush (low ace)
"2D 6D 3D 4D 5D", # straight flush
"5C 6C 3C 7C 4C", # straight flush
"JH 9H TH KH QH", # straight flush
"JH AH TH KH QH", # royal flush (high ace straight flush)
)
UpperCAmelCase =(
("2H 3H 4H 5H 6H", "KS AS TS QS JS", "Loss"),
("2H 3H 4H 5H 6H", "AS AD AC AH JD", "Win"),
("AS AH 2H AD AC", "JS JD JC JH 3D", "Win"),
("2S AH 2H AS AC", "JS JD JC JH AD", "Loss"),
("2S AH 2H AS AC", "2H 3H 5H 6H 7H", "Win"),
("AS 3S 4S 8S 2S", "2H 3H 5H 6H 7H", "Win"),
("2H 3H 5H 6H 7H", "2S 3H 4H 5S 6C", "Win"),
("2S 3H 4H 5S 6C", "3D 4C 5H 6H 2S", "Tie"),
("2S 3H 4H 5S 6C", "AH AC 5H 6H AS", "Win"),
("2S 2H 4H 5S 4C", "AH AC 5H 6H AS", "Loss"),
("2S 2H 4H 5S 4C", "AH AC 5H 6H 7S", "Win"),
("6S AD 7H 4S AS", "AH AC 5H 6H 7S", "Loss"),
("2S AH 4H 5S KC", "AH AC 5H 6H 7S", "Loss"),
("2S 3H 6H 7S 9C", "7H 3C TH 6H 9S", "Loss"),
("4S 5H 6H TS AC", "3S 5H 6H TS AC", "Win"),
("2S AH 4H 5S 6C", "AD 4C 5H 6H 2C", "Tie"),
("AS AH 3H AD AC", "AS AH 2H AD AC", "Win"),
("AH AC 5H 5C QS", "AH AC 5H 5C KS", "Loss"),
("AH AC 5H 5C QS", "KH KC 5H 5C QS", "Win"),
("7C 7S KH 2H 7H", "3C 3S AH 2H 3H", "Win"),
("3C 3S AH 2H 3H", "7C 7S KH 2H 7H", "Loss"),
("6H 5H 4H 3H 2H", "5H 4H 3H 2H AH", "Win"),
("5H 4H 3H 2H AH", "5H 4H 3H 2H AH", "Tie"),
("5H 4H 3H 2H AH", "6H 5H 4H 3H 2H", "Loss"),
("AH AD KS KC AC", "AH KD KH AC KC", "Win"),
("2H 4D 3C AS 5S", "2H 4D 3C 6S 5S", "Loss"),
("2H 3S 3C 3H 2S", "3S 3C 2S 2H 2D", "Win"),
("4D 6D 5D 2D JH", "3S 8S 3H TC KH", "Loss"),
("4S 6C 8S 3S 7S", "AD KS 2D 7D 7C", "Loss"),
("6S 4C 7H 8C 3H", "5H JC AH 9D 9C", "Loss"),
("9D 9H JH TC QH", "3C 2S JS 5C 7H", "Win"),
("2H TC 8S AD 9S", "4H TS 7H 2C 5C", "Win"),
("9D 3S 2C 7S 7C", "JC TD 3C TC 9H", "Loss"),
)
UpperCAmelCase =(
("2H 3H 4H 5H 6H", True),
("AS AH 2H AD AC", False),
("2H 3H 5H 6H 7H", True),
("KS AS TS QS JS", True),
("8H 9H QS JS TH", False),
("AS 3S 4S 8S 2S", True),
)
UpperCAmelCase =(
("2H 3H 4H 5H 6H", True),
("AS AH 2H AD AC", False),
("2H 3H 5H 6H 7H", False),
("KS AS TS QS JS", True),
("8H 9H QS JS TH", True),
)
UpperCAmelCase =(
("2H 4D 3C AS 5S", True, [5, 4, 3, 2, 14]),
("2H 5D 3C AS 5S", False, [14, 5, 5, 3, 2]),
("JH QD KC AS TS", False, [14, 13, 12, 11, 10]),
("9D 3S 2C 7S 7C", False, [9, 7, 7, 3, 2]),
)
UpperCAmelCase =(
("JH AH TH KH QH", 0),
("JH 9H TH KH QH", 0),
("JC KH JS JD JH", 7),
("KH KC 3S 3H 3D", 6),
("8C 9C 5C 3C TC", 0),
("JS QS 9H TS KH", 0),
("7C 7S KH 2H 7H", 3),
("3C KH 5D 5S KH", 2),
("QH 8H KD JH 8S", 1),
("2D 6D 9D TH 7D", 0),
)
UpperCAmelCase =(
("JH AH TH KH QH", 23),
("JH 9H TH KH QH", 22),
("JC KH JS JD JH", 21),
("KH KC 3S 3H 3D", 20),
("8C 9C 5C 3C TC", 19),
("JS QS 9H TS KH", 18),
("7C 7S KH 2H 7H", 17),
("3C KH 5D 5S KH", 16),
("QH 8H KD JH 8S", 15),
("2D 6D 9D TH 7D", 14),
)
def _A ( ):
"""simple docstring"""
A , A = randrange(len(_a ) ), randrange(len(_a ) )
A = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)]
A , A = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def _A ( _a : int = 1_0_0 ):
"""simple docstring"""
return (generate_random_hand() for _ in range(_a ))
@pytest.mark.parametrize("""hand, expected""" , _a )
def _A ( _a : Optional[int] , _a : Optional[Any] ):
"""simple docstring"""
assert PokerHand(_a )._is_flush() == expected
@pytest.mark.parametrize("""hand, expected""" , _a )
def _A ( _a : List[str] , _a : Any ):
"""simple docstring"""
assert PokerHand(_a )._is_straight() == expected
@pytest.mark.parametrize("""hand, expected, card_values""" , _a )
def _A ( _a : List[str] , _a : Tuple , _a : Optional[Any] ):
"""simple docstring"""
A = PokerHand(_a )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("""hand, expected""" , _a )
def _A ( _a : List[str] , _a : Any ):
"""simple docstring"""
assert PokerHand(_a )._is_same_kind() == expected
@pytest.mark.parametrize("""hand, expected""" , _a )
def _A ( _a : int , _a : Optional[Any] ):
"""simple docstring"""
assert PokerHand(_a )._hand_type == expected
@pytest.mark.parametrize("""hand, other, expected""" , _a )
def _A ( _a : Optional[int] , _a : List[str] , _a : str ):
"""simple docstring"""
assert PokerHand(_a ).compare_with(PokerHand(_a ) ) == expected
@pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() )
def _A ( _a : Optional[Any] , _a : Optional[int] , _a : str ):
"""simple docstring"""
assert PokerHand(_a ).compare_with(PokerHand(_a ) ) == expected
def _A ( ):
"""simple docstring"""
A = [PokerHand(_a ) for hand in SORTED_HANDS]
A = poker_hands.copy()
shuffle(_a )
A = chain(sorted(_a ) )
for index, hand in enumerate(_a ):
assert hand == poker_hands[index]
def _A ( ):
"""simple docstring"""
A = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )]
pokerhands.sort(reverse=_a )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def _A ( ):
"""simple docstring"""
A = PokerHand("""2C 4S AS 3D 5C""" )
A = True
A = [5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def _A ( ):
"""simple docstring"""
A = 0
A = os.path.abspath(os.path.dirname(_a ) )
A = os.path.join(_a , """poker_hands.txt""" )
with open(_a ) as file_hand:
for line in file_hand:
A = line[:1_4].strip()
A = line[1_5:].strip()
A , A = PokerHand(_a ), PokerHand(_a )
A = player.compare_with(_a )
if output == "Win":
answer += 1
assert answer == 3_7_6
| 368 |
"""simple docstring"""
import math
from numpy import inf
from scipy.integrate import quad
def _A ( _a : float ):
"""simple docstring"""
if num <= 0:
raise ValueError("""math domain error""" )
return quad(_a , 0 , _a , args=(_a) )[0]
def _A ( _a : float , _a : float ):
"""simple docstring"""
return math.pow(_a , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 77 | 0 |
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("ignore", category=UserWarning, module="torch.optim.lr_scheduler")
class UpperCAmelCase__ :
'''simple docstring'''
def __init__( self : Tuple , a_ : Optional[int] , a_ : Optional[Any] , a_ : bool = True , a_ : bool = False ):
'''simple docstring'''
__UpperCAmelCase : Any = scheduler
__UpperCAmelCase : List[str] = optimizers if isinstance(lowerCAmelCase__ , (list, tuple) ) else [optimizers]
__UpperCAmelCase : str = split_batches
__UpperCAmelCase : Optional[Any] = step_with_optimizer
__UpperCAmelCase : Dict = GradientState()
def snake_case__ ( self : Union[str, Any] , *a_ : Tuple , **a_ : List[Any] ):
'''simple docstring'''
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*lowerCAmelCase__ , **lowerCAmelCase__ )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*lowerCAmelCase__ , **lowerCAmelCase__ )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
__UpperCAmelCase : Union[str, Any] = AcceleratorState().num_processes
for _ in range(lowerCAmelCase__ ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , '''total_steps''' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*lowerCAmelCase__ , **lowerCAmelCase__ )
else:
self.scheduler.step(*lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case__ ( self : str ):
'''simple docstring'''
return self.scheduler.get_last_lr()
def snake_case__ ( self : Dict ):
'''simple docstring'''
return self.scheduler.state_dict()
def snake_case__ ( self : Dict , a_ : Optional[Any] ):
'''simple docstring'''
self.scheduler.load_state_dict(lowerCAmelCase__ )
def snake_case__ ( self : List[Any] ):
'''simple docstring'''
return self.scheduler.get_lr()
def snake_case__ ( self : Optional[int] , *a_ : List[str] , **a_ : Optional[int] ):
'''simple docstring'''
return self.scheduler.print_lr(*lowerCAmelCase__ , **lowerCAmelCase__ )
| 226 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase_ )
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : str = field(default='''automatic-speech-recognition''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
_UpperCAmelCase : ClassVar[Features] = Features({'''audio''': Audio()} )
_UpperCAmelCase : ClassVar[Features] = Features({'''transcription''': Value('''string''' )} )
_UpperCAmelCase : str = "audio"
_UpperCAmelCase : str = "transcription"
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : int):
if self.audio_column not in features:
raise ValueError(F"Column {self.audio_column} is not present in features.")
if not isinstance(features[self.audio_column] , lowerCAmelCase__):
raise ValueError(F"Column {self.audio_column} is not an Audio type.")
SCREAMING_SNAKE_CASE_: Tuple = copy.deepcopy(self)
SCREAMING_SNAKE_CASE_: Optional[int] = self.input_schema.copy()
SCREAMING_SNAKE_CASE_: Dict = features[self.audio_column]
SCREAMING_SNAKE_CASE_: int = input_schema
return task_template
@property
def _SCREAMING_SNAKE_CASE ( self : int):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 13 | 0 |
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
lowerCamelCase_ = TypeVar('''T''')
lowerCamelCase_ = TypeVar('''U''')
class __A( Generic[T, U] ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = key
UpperCamelCase__ = val
UpperCamelCase__ = None
UpperCamelCase__ = None
def __repr__(self ):
return (
F"Node: key: {self.key}, val: {self.val}, "
F"has next: {bool(self.next )}, has prev: {bool(self.prev )}"
)
class __A( Generic[T, U] ):
"""simple docstring"""
def __init__(self ):
UpperCamelCase__ = DoubleLinkedListNode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = DoubleLinkedListNode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ , UpperCamelCase__ = self.rear, self.head
def __repr__(self ):
UpperCamelCase__ = ["""DoubleLinkedList"""]
UpperCamelCase__ = self.head
while node.next is not None:
rep.append(str(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase__ = node.next
rep.append(str(self.rear ) )
return ",\n ".join(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
UpperCamelCase__ = node
UpperCamelCase__ = previous
UpperCamelCase__ = node
UpperCamelCase__ = self.rear
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
if node.prev is None or node.next is None:
return None
UpperCamelCase__ = node.next
UpperCamelCase__ = node.prev
UpperCamelCase__ = None
UpperCamelCase__ = None
return node
class __A( Generic[T, U] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = {}
def __init__(self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = DoubleLinkedList()
UpperCamelCase__ = capacity
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = {}
def __repr__(self ):
return (
F"CacheInfo(hits={self.hits}, misses={self.miss}, "
F"capacity={self.capacity}, current size={self.num_keys})"
)
def __contains__(self , SCREAMING_SNAKE_CASE_ ):
return key in self.cache
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
UpperCamelCase__ = self.cache[key]
UpperCamelCase__ = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(SCREAMING_SNAKE_CASE_ )
return node.val
self.miss += 1
return None
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
UpperCamelCase__ = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(SCREAMING_SNAKE_CASE_ ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
UpperCamelCase__ = DoubleLinkedListNode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
UpperCamelCase__ = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
UpperCamelCase__ = value
self.list.add(SCREAMING_SNAKE_CASE_ )
@classmethod
def UpperCAmelCase_ (cls , SCREAMING_SNAKE_CASE_ = 1_28 ):
def cache_decorator_inner(SCREAMING_SNAKE_CASE_ ) -> Callable[..., U]:
def cache_decorator_wrapper(*SCREAMING_SNAKE_CASE_ ) -> U:
if func not in cls.decorator_function_to_instance_map:
UpperCamelCase__ = LRUCache(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
UpperCamelCase__ = func(*SCREAMING_SNAKE_CASE_ )
cls.decorator_function_to_instance_map[func].put(args[0] , SCREAMING_SNAKE_CASE_ )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(SCREAMING_SNAKE_CASE_ , """cache_info""" , SCREAMING_SNAKE_CASE_ ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363 |
from PIL import Image
def __magic_name__ ( __a : Image ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = image.size
UpperCamelCase__ = 0
UpperCamelCase__ = image.load()
for i in range(__a ):
for j in range(__a ):
UpperCamelCase__ = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(__a ):
for i in range(__a ):
UpperCamelCase__ = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
lowerCamelCase_ = mean_threshold(Image.open('''path_to_image''').convert('''L'''))
image.save('''output_image_path''')
| 178 | 0 |
'''simple docstring'''
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
__lowerCAmelCase = {
'''text_branch''': '''text_model''',
'''audio_branch''': '''audio_model.audio_encoder''',
'''attn''': '''attention.self''',
'''self.proj''': '''output.dense''',
'''attention.self_mask''': '''attn_mask''',
'''mlp.fc1''': '''intermediate.dense''',
'''mlp.fc2''': '''output.dense''',
'''norm1''': '''layernorm_before''',
'''norm2''': '''layernorm_after''',
'''bn0''': '''batch_norm''',
}
__lowerCAmelCase = AutoFeatureExtractor.from_pretrained('''laion/clap-htsat-unfused''', truncation='''rand_trunc''')
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=False ) -> Any:
_a , _a : Union[str, Any] = create_model(
'HTSAT-tiny' , 'roberta' , lowerCAmelCase_ , precision='fp32' , device='cuda:0' if torch.cuda.is_available() else 'cpu' , enable_fusion=lowerCAmelCase_ , fusion_type='aff_2d' if enable_fusion else None , )
return model, model_cfg
def __lowerCamelCase ( lowerCAmelCase_ ) -> int:
_a : Optional[int] = {}
_a : Tuple = r'.*sequential.(\d+).*'
_a : int = r'.*_projection.(\d+).*'
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_a : Union[str, Any] = key.replace(lowerCAmelCase_ , lowerCAmelCase_ )
if re.match(lowerCAmelCase_ , lowerCAmelCase_ ):
# replace sequential layers with list
_a : List[str] = re.match(lowerCAmelCase_ , lowerCAmelCase_ ).group(1 )
_a : Optional[Any] = key.replace(f"""sequential.{sequential_layer}.""" , f"""layers.{int(lowerCAmelCase_ )//3}.linear.""" )
elif re.match(lowerCAmelCase_ , lowerCAmelCase_ ):
_a : str = int(re.match(lowerCAmelCase_ , lowerCAmelCase_ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
_a : Optional[Any] = 1 if projecton_layer == 0 else 2
_a : int = key.replace(f"""_projection.{projecton_layer}.""" , f"""_projection.linear{transformers_projection_layer}.""" )
if "audio" and "qkv" in key:
# split qkv into query key and value
_a : str = value
_a : List[str] = mixed_qkv.size(0 ) // 3
_a : str = mixed_qkv[:qkv_dim]
_a : int = mixed_qkv[qkv_dim : qkv_dim * 2]
_a : Any = mixed_qkv[qkv_dim * 2 :]
_a : List[Any] = query_layer
_a : Union[str, Any] = key_layer
_a : Tuple = value_layer
else:
_a : Dict = value
return model_state_dict
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ) -> int:
_a , _a : Optional[Any] = init_clap(lowerCAmelCase_ , enable_fusion=lowerCAmelCase_ )
clap_model.eval()
_a : Tuple = clap_model.state_dict()
_a : Optional[int] = rename_state_dict(lowerCAmelCase_ )
_a : List[str] = ClapConfig()
_a : Tuple = enable_fusion
_a : int = ClapModel(lowerCAmelCase_ )
# ignore the spectrogram embedding layer
model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
transformers_config.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument('''--enable_fusion''', action='''store_true''', help='''Whether to enable fusion or not''')
__lowerCAmelCase = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 89 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowercase ( self : Optional[Any] ):
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = BlipImageProcessor()
lowerCAmelCase = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" )
lowerCAmelCase = BlipProcessor(lowerCAmelCase , lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def __lowercase ( self : Optional[Any] , **lowerCAmelCase : Tuple ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase ).tokenizer
def __lowercase ( self : List[Any] , **lowerCAmelCase : Optional[Any] ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase ).image_processor
def __lowercase ( self : Dict ):
shutil.rmtree(self.tmpdirname )
def __lowercase ( self : str ):
lowerCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase = [Image.fromarray(np.moveaxis(lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowercase ( self : List[str] ):
lowerCAmelCase = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCAmelCase = self.get_image_processor(do_normalize=lowerCAmelCase , padding_value=1.0 )
lowerCAmelCase = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase )
def __lowercase ( self : Optional[int] ):
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = BlipProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = image_processor(lowerCAmelCase , return_tensors="""np""" )
lowerCAmelCase = processor(images=lowerCAmelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowercase ( self : Tuple ):
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = BlipProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
lowerCAmelCase = """lower newer"""
lowerCAmelCase = processor(text=lowerCAmelCase )
lowerCAmelCase = tokenizer(lowerCAmelCase , return_token_type_ids=lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowercase ( self : Union[str, Any] ):
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = BlipProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
lowerCAmelCase = """lower newer"""
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=lowerCAmelCase , images=lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase ):
processor()
def __lowercase ( self : List[Any] ):
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = BlipProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase = processor.batch_decode(lowerCAmelCase )
lowerCAmelCase = tokenizer.batch_decode(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
def __lowercase ( self : Optional[int] ):
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = BlipProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
lowerCAmelCase = """lower newer"""
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=lowerCAmelCase , images=lowerCAmelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 155 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
__A : Any = {'configuration_dpt': ['DPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DPTConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : str = ['DPTFeatureExtractor']
__A : Any = ['DPTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = [
'DPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DPTForDepthEstimation',
'DPTForSemanticSegmentation',
'DPTModel',
'DPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
__A : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 8 |
'''simple docstring'''
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
__A : int = logging.getLogger()
def UpperCAmelCase ( ):
'''simple docstring'''
snake_case_ : List[Any] = argparse.ArgumentParser()
parser.add_argument("""-f""" )
snake_case_ : int = parser.parse_args()
return args.f
def UpperCAmelCase ( lowerCamelCase_ :str ):
'''simple docstring'''
snake_case_ : Optional[Any] = {}
snake_case_ : Optional[Any] = os.path.join(lowerCamelCase_ , """all_results.json""" )
if os.path.exists(lowerCamelCase_ ):
with open(lowerCamelCase_ , """r""" ) as f:
snake_case_ : str = json.load(lowerCamelCase_ )
else:
raise ValueError(F'''can\'t find {path}''' )
return results
def UpperCAmelCase ( ):
'''simple docstring'''
snake_case_ : List[str] = torch.cuda.is_available() and torch_device == """cuda"""
return is_using_cuda and is_apex_available()
__A : Any = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __UpperCamelCase ( lowercase__ ):
@classmethod
def a__ ( cls :Dict ):
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
snake_case_ : Optional[int] = tempfile.mkdtemp()
snake_case_ : Any = os.path.join(cls.tmpdir ,"""default_config.yml""" )
write_basic_config(save_location=cls.configPath )
snake_case_ : List[Any] = ["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def a__ ( cls :int ):
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} )
def a__ ( self :Optional[int] ):
snake_case_ : List[Any] = self.get_auto_remove_tmp_dir()
snake_case_ : List[str] = F'''
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
'''.split()
if is_cuda_and_apex_available():
testargs.append("""--fp16""" )
run_command(self._launch_args + testargs )
snake_case_ : Dict = get_results(_UpperCamelCase )
self.assertGreaterEqual(result["""eval_accuracy"""] ,0.75 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""glue_no_trainer""" ) ) )
@mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} )
def a__ ( self :Tuple ):
snake_case_ : str = self.get_auto_remove_tmp_dir()
snake_case_ : Tuple = F'''
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
'''.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
snake_case_ : Optional[int] = get_results(_UpperCamelCase )
self.assertLess(result["""perplexity"""] ,1_0_0 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""clm_no_trainer""" ) ) )
@mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} )
def a__ ( self :Tuple ):
snake_case_ : List[Any] = self.get_auto_remove_tmp_dir()
snake_case_ : List[str] = F'''
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
snake_case_ : str = get_results(_UpperCamelCase )
self.assertLess(result["""perplexity"""] ,4_2 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""mlm_no_trainer""" ) ) )
@mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} )
def a__ ( self :List[Any] ):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
snake_case_ : Dict = 7 if get_gpu_count() > 1 else 2
snake_case_ : str = self.get_auto_remove_tmp_dir()
snake_case_ : str = F'''
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
snake_case_ : Optional[int] = get_results(_UpperCamelCase )
self.assertGreaterEqual(result["""eval_accuracy"""] ,0.75 )
self.assertLess(result["""train_loss"""] ,0.5 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""ner_no_trainer""" ) ) )
@unittest.skip(reason="""Fix me @muellerzr""" )
@mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} )
def a__ ( self :List[str] ):
snake_case_ : List[Any] = self.get_auto_remove_tmp_dir()
snake_case_ : Optional[int] = F'''
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
snake_case_ : str = get_results(_UpperCamelCase )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["""eval_f1"""] ,2_8 )
self.assertGreaterEqual(result["""eval_exact"""] ,2_8 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""qa_no_trainer""" ) ) )
@mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} )
def a__ ( self :List[Any] ):
snake_case_ : str = self.get_auto_remove_tmp_dir()
snake_case_ : Union[str, Any] = F'''
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
snake_case_ : Union[str, Any] = get_results(_UpperCamelCase )
self.assertGreaterEqual(result["""eval_accuracy"""] ,0.8 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""swag_no_trainer""" ) ) )
@slow
@mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} )
def a__ ( self :int ):
snake_case_ : List[Any] = self.get_auto_remove_tmp_dir()
snake_case_ : List[Any] = F'''
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
snake_case_ : int = get_results(_UpperCamelCase )
self.assertGreaterEqual(result["""eval_rouge1"""] ,1_0 )
self.assertGreaterEqual(result["""eval_rouge2"""] ,2 )
self.assertGreaterEqual(result["""eval_rougeL"""] ,7 )
self.assertGreaterEqual(result["""eval_rougeLsum"""] ,7 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""summarization_no_trainer""" ) ) )
@slow
@mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} )
def a__ ( self :int ):
snake_case_ : Tuple = self.get_auto_remove_tmp_dir()
snake_case_ : Optional[Any] = F'''
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
snake_case_ : Any = get_results(_UpperCamelCase )
self.assertGreaterEqual(result["""eval_bleu"""] ,3_0 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""translation_no_trainer""" ) ) )
@slow
def a__ ( self :Optional[Any] ):
snake_case_ : List[str] = logging.StreamHandler(sys.stdout )
logger.addHandler(_UpperCamelCase )
snake_case_ : Dict = self.get_auto_remove_tmp_dir()
snake_case_ : Tuple = F'''
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
'''.split()
run_command(self._launch_args + testargs )
snake_case_ : str = get_results(_UpperCamelCase )
self.assertGreaterEqual(result["""eval_overall_accuracy"""] ,0.10 )
@mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} )
def a__ ( self :Any ):
snake_case_ : Dict = self.get_auto_remove_tmp_dir()
snake_case_ : Tuple = F'''
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
'''.split()
if is_cuda_and_apex_available():
testargs.append("""--fp16""" )
run_command(self._launch_args + testargs )
snake_case_ : str = get_results(_UpperCamelCase )
# The base model scores a 25%
self.assertGreaterEqual(result["""eval_accuracy"""] ,0.6 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""step_1""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""image_classification_no_trainer""" ) ) ) | 8 | 1 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class _lowercase :
def __init__( self: List[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Dict=2 , UpperCamelCase__: Optional[int]=True , UpperCamelCase__: Any=False , UpperCamelCase__: Dict=10 , UpperCamelCase__: List[str]=3 , UpperCamelCase__: int=32 * 4 , UpperCamelCase__: List[str]=32 * 6 , UpperCamelCase__: Optional[int]=4 , UpperCamelCase__: int=32 , ):
lowerCamelCase__ : List[str] = parent
lowerCamelCase__ : List[Any] = batch_size
lowerCamelCase__ : Union[str, Any] = is_training
lowerCamelCase__ : List[Any] = use_auxiliary_loss
lowerCamelCase__ : List[Any] = num_queries
lowerCamelCase__ : List[str] = num_channels
lowerCamelCase__ : Optional[Any] = min_size
lowerCamelCase__ : List[Any] = max_size
lowerCamelCase__ : int = num_labels
lowerCamelCase__ : Optional[int] = mask_feature_size
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=UpperCamelCase__ ) > 0.5
).float()
lowerCamelCase__ : Tuple = (torch.rand((self.batch_size, self.num_labels) , device=UpperCamelCase__ ) > 0.5).long()
lowerCamelCase__ : Tuple = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCamelCase_ ( self: Optional[int] ):
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.prepare_config_and_inputs()
lowerCamelCase__ : Tuple = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : Optional[Any] = output.encoder_hidden_states
lowerCamelCase__ : List[Any] = output.pixel_decoder_hidden_states
lowerCamelCase__ : str = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(UpperCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(UpperCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(UpperCamelCase__ ) , config.decoder_config.decoder_layers )
def lowerCamelCase_ ( self: Any , UpperCamelCase__: List[Any] , UpperCamelCase__: Dict , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[int]=False ):
with torch.no_grad():
lowerCamelCase__ : Optional[int] = MaskFormerModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Dict = model(pixel_values=UpperCamelCase__ , pixel_mask=UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ , output_hidden_states=UpperCamelCase__ )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: int , UpperCamelCase__: Dict , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Any , UpperCamelCase__: List[str] ):
lowerCamelCase__ : List[Any] = MaskFormerForInstanceSegmentation(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
def comm_check_on_output(UpperCamelCase__: List[str] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowerCamelCase__ : Optional[int] = model(pixel_values=UpperCamelCase__ , pixel_mask=UpperCamelCase__ )
lowerCamelCase__ : Any = model(UpperCamelCase__ )
comm_check_on_output(UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = model(
pixel_values=UpperCamelCase__ , pixel_mask=UpperCamelCase__ , mask_labels=UpperCamelCase__ , class_labels=UpperCamelCase__ )
comm_check_on_output(UpperCamelCase__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
a = (
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
a = False
a = False
a = False
a = False
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : Any = MaskFormerModelTester(self )
lowerCamelCase__ : List[Any] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(UpperCamelCase__ , **UpperCamelCase__ , output_hidden_states=UpperCamelCase__ )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*UpperCamelCase__ )
@unittest.skip(reason="""MaskFormer does not use inputs_embeds""" )
def lowerCamelCase_ ( self: str ):
pass
@unittest.skip(reason="""MaskFormer does not have a get_input_embeddings method""" )
def lowerCamelCase_ ( self: str ):
pass
@unittest.skip(reason="""MaskFormer is not a generative model""" )
def lowerCamelCase_ ( self: Union[str, Any] ):
pass
@unittest.skip(reason="""MaskFormer does not use token embeddings""" )
def lowerCamelCase_ ( self: Any ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def lowerCamelCase_ ( self: Optional[Any] ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase_ ( self: List[Any] ):
pass
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ )
lowerCamelCase__ : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : int = [*signature.parameters.keys()]
lowerCamelCase__ : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: List[str] ):
for model_name in ["facebook/maskformer-swin-small-coco"]:
lowerCamelCase__ : Any = MaskFormerModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Union[str, Any] = (self.model_tester.min_size,) * 2
lowerCamelCase__ : Dict = {
"""pixel_values""": torch.randn((2, 3, *size) , device=UpperCamelCase__ ),
"""mask_labels""": torch.randn((2, 10, *size) , device=UpperCamelCase__ ),
"""class_labels""": torch.zeros(2 , 10 , device=UpperCamelCase__ ).long(),
}
lowerCamelCase__ : List[str] = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(UpperCamelCase__ )
lowerCamelCase__ : List[str] = model(**UpperCamelCase__ )
self.assertTrue(outputs.loss is not None )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(UpperCamelCase__ , **UpperCamelCase__ , output_hidden_states=UpperCamelCase__ )
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : str = model_class(UpperCamelCase__ ).to(UpperCamelCase__ )
lowerCamelCase__ : List[str] = model(**UpperCamelCase__ , output_attentions=UpperCamelCase__ )
self.assertTrue(outputs.attentions is not None )
def lowerCamelCase_ ( self: Union[str, Any] ):
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
lowerCamelCase__ : Optional[Any] = self.all_model_classes[1]
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
lowerCamelCase__ : Dict = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.train()
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ , mask_labels=UpperCamelCase__ , class_labels=UpperCamelCase__ ).loss
loss.backward()
def lowerCamelCase_ ( self: str ):
# only MaskFormerForInstanceSegmentation has the loss
lowerCamelCase__ : List[str] = self.all_model_classes[1]
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
lowerCamelCase__ : List[str] = True
lowerCamelCase__ : Tuple = True
lowerCamelCase__ : int = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.train()
lowerCamelCase__ : Tuple = model(UpperCamelCase__ , mask_labels=UpperCamelCase__ , class_labels=UpperCamelCase__ )
lowerCamelCase__ : List[Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowerCamelCase__ : Optional[int] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
lowerCamelCase__ : int = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowerCamelCase__ : str = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=UpperCamelCase__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_A : Optional[int] =1e-4
def SCREAMING_SNAKE_CASE_ () -> Optional[int]:
lowerCamelCase__ : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class _lowercase ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: Dict ):
return (
MaskFormerImageProcessor.from_pretrained("""facebook/maskformer-swin-small-coco""" )
if is_vision_available()
else None
)
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Optional[Any] = MaskFormerModel.from_pretrained("""facebook/maskformer-swin-small-coco""" ).to(UpperCamelCase__ )
lowerCamelCase__ : str = self.default_image_processor
lowerCamelCase__ : Dict = prepare_img()
lowerCamelCase__ : List[str] = image_processor(UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
lowerCamelCase__ : Any = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCamelCase__ , (1, 3, 800, 1_088) )
with torch.no_grad():
lowerCamelCase__ : List[str] = model(**UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = torch.tensor(
[[-0.0_482, 0.9_228, 0.4_951], [-0.2_547, 0.8_017, 0.8_527], [-0.0_069, 0.3_385, -0.0_089]] ).to(UpperCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__ ) )
lowerCamelCase__ : Optional[Any] = torch.tensor(
[[-0.8_422, -0.8_434, -0.9_718], [-1.0_144, -0.5_565, -0.4_195], [-1.0_038, -0.4_484, -0.1_961]] ).to(UpperCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__ ) )
lowerCamelCase__ : List[str] = torch.tensor(
[[0.2_852, -0.0_159, 0.9_735], [0.6_254, 0.1_858, 0.8_529], [-0.0_680, -0.4_116, 1.8_413]] ).to(UpperCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__ ) )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Union[str, Any] = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(UpperCamelCase__ )
.eval()
)
lowerCamelCase__ : Optional[int] = self.default_image_processor
lowerCamelCase__ : List[Any] = prepare_img()
lowerCamelCase__ : Tuple = image_processor(UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCamelCase__ , (1, 3, 800, 1_088) )
with torch.no_grad():
lowerCamelCase__ : Dict = model(**UpperCamelCase__ )
# masks_queries_logits
lowerCamelCase__ : List[str] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowerCamelCase__ : int = [
[-1.3_737_124, -1.7_724_937, -1.9_364_233],
[-1.5_977_281, -1.9_867_939, -2.1_523_695],
[-1.5_795_398, -1.9_269_832, -2.093_942],
]
lowerCamelCase__ : int = torch.tensor(UpperCamelCase__ ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__ ) )
# class_queries_logits
lowerCamelCase__ : Any = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowerCamelCase__ : Tuple = torch.tensor(
[
[1.6_512e00, -5.2_572e00, -3.3_519e00],
[3.6_169e-02, -5.9_025e00, -2.9_313e00],
[1.0_766e-04, -7.7_630e00, -5.1_263e00],
] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__ ) )
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : Optional[int] = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-resnet101-coco-stuff""" )
.to(UpperCamelCase__ )
.eval()
)
lowerCamelCase__ : str = self.default_image_processor
lowerCamelCase__ : List[str] = prepare_img()
lowerCamelCase__ : List[Any] = image_processor(UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
lowerCamelCase__ : List[str] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCamelCase__ , (1, 3, 800, 1_088) )
with torch.no_grad():
lowerCamelCase__ : Any = model(**UpperCamelCase__ )
# masks_queries_logits
lowerCamelCase__ : int = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowerCamelCase__ : Optional[Any] = [[-0.9_046, -2.6_366, -4.6_062], [-3.4_179, -5.7_890, -8.8_057], [-4.9_179, -7.6_560, -10.7_711]]
lowerCamelCase__ : List[Any] = torch.tensor(UpperCamelCase__ ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__ ) )
# class_queries_logits
lowerCamelCase__ : Dict = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowerCamelCase__ : Optional[int] = torch.tensor(
[[4.7_188, -3.2_585, -2.8_857], [6.6_871, -2.9_181, -1.2_487], [7.2_449, -2.2_764, -2.1_874]] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__ ) )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Optional[int] = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(UpperCamelCase__ )
.eval()
)
lowerCamelCase__ : Any = self.default_image_processor
lowerCamelCase__ : Optional[Any] = image_processor(
[np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="""pt""" , )
lowerCamelCase__ : Optional[int] = inputs["""pixel_values"""].to(UpperCamelCase__ )
lowerCamelCase__ : int = [el.to(UpperCamelCase__ ) for el in inputs["""mask_labels"""]]
lowerCamelCase__ : int = [el.to(UpperCamelCase__ ) for el in inputs["""class_labels"""]]
with torch.no_grad():
lowerCamelCase__ : List[Any] = model(**UpperCamelCase__ )
self.assertTrue(outputs.loss is not None )
| 41 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _lowercase ( _lowercase ):
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase__ , """tf_padding""" ) )
self.parent.assertTrue(hasattr(UpperCamelCase__ , """depth_multiplier""" ) )
class _lowercase :
def __init__( self: str , UpperCamelCase__: Dict , UpperCamelCase__: Tuple=13 , UpperCamelCase__: Optional[int]=3 , UpperCamelCase__: List[Any]=32 , UpperCamelCase__: Optional[Any]=0.25 , UpperCamelCase__: int=8 , UpperCamelCase__: Any=True , UpperCamelCase__: Dict=1_024 , UpperCamelCase__: Optional[int]=32 , UpperCamelCase__: Tuple="relu6" , UpperCamelCase__: int=0.1 , UpperCamelCase__: List[Any]=0.02 , UpperCamelCase__: Optional[Any]=True , UpperCamelCase__: Union[str, Any]=True , UpperCamelCase__: Union[str, Any]=10 , UpperCamelCase__: str=None , ):
lowerCamelCase__ : Optional[Any] = parent
lowerCamelCase__ : List[str] = batch_size
lowerCamelCase__ : Optional[int] = num_channels
lowerCamelCase__ : Optional[int] = image_size
lowerCamelCase__ : Optional[Any] = depth_multiplier
lowerCamelCase__ : Union[str, Any] = min_depth
lowerCamelCase__ : Optional[Any] = tf_padding
lowerCamelCase__ : str = int(last_hidden_size * depth_multiplier )
lowerCamelCase__ : Any = output_stride
lowerCamelCase__ : int = hidden_act
lowerCamelCase__ : Tuple = classifier_dropout_prob
lowerCamelCase__ : Dict = use_labels
lowerCamelCase__ : Tuple = is_training
lowerCamelCase__ : Optional[Any] = num_labels
lowerCamelCase__ : Union[str, Any] = initializer_range
lowerCamelCase__ : Optional[Any] = scope
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : Dict = None
if self.use_labels:
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCamelCase__ : Dict = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCamelCase_ ( self: str ):
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: int , UpperCamelCase__: str , UpperCamelCase__: Any , UpperCamelCase__: Optional[int] ):
lowerCamelCase__ : List[str] = MobileNetVaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : List[str] = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase_ ( self: str , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : List[str] = self.num_labels
lowerCamelCase__ : Optional[Any] = MobileNetVaForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : str = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : int = config_and_inputs
lowerCamelCase__ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
a = (
{"""feature-extraction""": MobileNetVaModel, """image-classification""": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
a = False
a = False
a = False
a = False
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Optional[int] = MobileNetVaModelTester(self )
lowerCamelCase__ : List[str] = MobileNetVaConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV1 does not use inputs_embeds""" )
def lowerCamelCase_ ( self: Optional[int] ):
pass
@unittest.skip(reason="""MobileNetV1 does not support input and output embeddings""" )
def lowerCamelCase_ ( self: Optional[Any] ):
pass
@unittest.skip(reason="""MobileNetV1 does not output attentions""" )
def lowerCamelCase_ ( self: Any ):
pass
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Dict = model_class(UpperCamelCase__ )
lowerCamelCase__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : List[Any] = [*signature.parameters.keys()]
lowerCamelCase__ : Dict = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
def check_hidden_states_output(UpperCamelCase__: List[Any] , UpperCamelCase__: Dict , UpperCamelCase__: List[Any] ):
lowerCamelCase__ : str = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Union[str, Any] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : List[Any] = outputs.hidden_states
lowerCamelCase__ : Tuple = 26
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : List[Any] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : Optional[Any] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: List[str] ):
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Dict = MobileNetVaModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ () -> Union[str, Any]:
lowerCamelCase__ : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: Optional[int] ):
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v1_1.0_224""" ) if is_vision_available() else None
)
@slow
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : List[Any] = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v1_1.0_224""" ).to(UpperCamelCase__ )
lowerCamelCase__ : Dict = self.default_image_processor
lowerCamelCase__ : int = prepare_img()
lowerCamelCase__ : List[Any] = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : str = model(**UpperCamelCase__ )
# verify the logits
lowerCamelCase__ : List[str] = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCamelCase__ : List[str] = torch.tensor([-4.1_739, -1.1_233, 3.1_205] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
| 41 | 1 |
"""simple docstring"""
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
SCREAMING_SNAKE_CASE__:List[Any] = datasets.logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Union[str, Any] = """\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",
author = \"Moosavi, Nafise Sadat and
Strube, Michael\",
booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",
month = aug,
year = \"2016\",
address = \"Berlin, Germany\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/P16-1060\",
doi = \"10.18653/v1/P16-1060\",
pages = \"632--642\",
}
"""
SCREAMING_SNAKE_CASE__:str = """\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
"""
SCREAMING_SNAKE_CASE__:Optional[int] = """
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting 'keep_singletons=False', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
'mentions': mentions
'muc': MUC metric [Vilain et al, 1995]
'bcub': B-cubed [Bagga and Baldwin, 1998]
'ceafe': CEAFe [Luo et al., 2005]
'lea': LEA [Moosavi and Strube, 2016]
'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric('coval')
>>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',
... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',
... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',
... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',
... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',
... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{'mentions/recall': 1.0,[...] 'conll_score': 100.0}
"""
def _lowerCamelCase( a , a , a=False , a=False , a=True , a=False , a="dummy_doc" ):
__a = {doc: key_lines}
__a = {doc: sys_lines}
__a = {}
__a = 0
__a = 0
__a = 0
__a = 0
__a = 0
__a = 0
__a , __a = reader.get_doc_mentions(a , key_doc_lines[doc] , a )
key_singletons_num += singletons_num
if NP_only or min_span:
__a = reader.set_annotated_parse_trees(a , key_doc_lines[doc] , a , a )
__a , __a = reader.get_doc_mentions(a , sys_doc_lines[doc] , a )
sys_singletons_num += singletons_num
if NP_only or min_span:
__a = reader.set_annotated_parse_trees(a , key_doc_lines[doc] , a , a )
if remove_nested:
__a , __a = reader.remove_nested_coref_mentions(a , a )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
__a , __a = reader.remove_nested_coref_mentions(a , a )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
__a = reader.get_mention_assignments(a , a )
__a = reader.get_mention_assignments(a , a )
__a = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"Number of removed nested coreferring mentions in the key "
F"annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}" )
logger.info(
"Number of resulting singleton clusters in the key "
F"annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}" )
if not keep_singletons:
logger.info(
F"{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system "
"files, respectively" )
return doc_coref_infos
def _lowerCamelCase( a , a , a , a , a , a , a ):
__a = get_coref_infos(a , a , a , a , a , a )
__a = {}
__a = 0
__a = 0
for name, metric in metrics:
__a , __a , __a = evaluator.evaluate_documents(a , a , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F"{name}/recall": recall, F"{name}/precision": precision, F"{name}/f1": fa} )
logger.info(
name.ljust(1_0 ) , F"Recall: {recall * 1_0_0:.2f}" , F" Precision: {precision * 1_0_0:.2f}" , F" F1: {fa * 1_0_0:.2f}" , )
if conll_subparts_num == 3:
__a = (conll / 3) * 1_0_0
logger.info(F"CoNLL score: {conll:.2f}" )
output_scores.update({"conll_score": conll} )
return output_scores
def _lowerCamelCase( a ):
__a = False
for line in key_lines:
if not line.startswith("#" ):
if len(line.split() ) > 6:
__a = line.split()[5]
if not parse_col == "-":
__a = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class snake_case__ ( datasets.Metric ):
def a__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Sequence(datasets.Value("string" ) ),
} ) , codebase_urls=["https://github.com/ns-moosavi/coval"] , reference_urls=[
"https://github.com/ns-moosavi/coval",
"https://www.aclweb.org/anthology/P16-1060",
"http://www.conll.cemantix.org/2012/data.html",
] , )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=False ):
__a = [
("mentions", evaluator.mentions),
("muc", evaluator.muc),
("bcub", evaluator.b_cubed),
("ceafe", evaluator.ceafe),
("lea", evaluator.lea),
]
if min_span:
__a = util.check_gold_parse_annotation(lowerCamelCase )
if not has_gold_parse:
raise NotImplementedError("References should have gold parse annotation to use 'min_span'." )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
__a = evaluate(
key_lines=lowerCamelCase , sys_lines=lowerCamelCase , metrics=lowerCamelCase , NP_only=lowerCamelCase , remove_nested=lowerCamelCase , keep_singletons=lowerCamelCase , min_span=lowerCamelCase , )
return score
| 364 | """simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
SCREAMING_SNAKE_CASE__:List[str] = 3
def _lowerCamelCase( a ):
print("Generating primitive root of p" )
while True:
__a = random.randrange(3 , a )
if pow(a , 2 , a ) == 1:
continue
if pow(a , a , a ) == 1:
continue
return g
def _lowerCamelCase( a ):
print("Generating prime p..." )
__a = rabin_miller.generate_large_prime(a ) # select large prime number.
__a = primitive_root(a ) # one primitive root on modulo p.
__a = random.randrange(3 , a ) # private_key -> have to be greater than 2 for safety.
__a = cryptomath.find_mod_inverse(pow(a , a , a ) , a )
__a = (key_size, e_a, e_a, p)
__a = (key_size, d)
return public_key, private_key
def _lowerCamelCase( a , a ):
if os.path.exists(F"{name}_pubkey.txt" ) or os.path.exists(F"{name}_privkey.txt" ):
print("\nWARNING:" )
print(
F"\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"
"Use a different name or delete these files and re-run this program." )
sys.exit()
__a , __a = generate_key(a )
print(F"\nWriting public key to file {name}_pubkey.txt..." )
with open(F"{name}_pubkey.txt" , "w" ) as fo:
fo.write(F"{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}" )
print(F"Writing private key to file {name}_privkey.txt..." )
with open(F"{name}_privkey.txt" , "w" ) as fo:
fo.write(F"{private_key[0]},{private_key[1]}" )
def _lowerCamelCase( ):
print("Making key files..." )
make_key_files("elgamal" , 2_0_4_8 )
print("Key files generation successful" )
if __name__ == "__main__":
main()
| 268 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
_lowerCAmelCase = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
for attribute in key.split(""".""" ):
lowerCAmelCase__ : Optional[Any] = getattr(UpperCamelCase , UpperCamelCase )
if weight_type is not None:
lowerCAmelCase__ : int = getattr(UpperCamelCase , UpperCamelCase ).shape
else:
lowerCAmelCase__ : int = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowerCAmelCase__ : Union[str, Any] = value
elif weight_type == "weight_g":
lowerCAmelCase__ : Any = value
elif weight_type == "weight_v":
lowerCAmelCase__ : Optional[int] = value
elif weight_type == "bias":
lowerCAmelCase__ : List[str] = value
elif weight_type == "running_mean":
lowerCAmelCase__ : List[str] = value
elif weight_type == "running_var":
lowerCAmelCase__ : int = value
elif weight_type == "num_batches_tracked":
lowerCAmelCase__ : Tuple = value
elif weight_type == "inv_freq":
lowerCAmelCase__ : str = value
else:
lowerCAmelCase__ : str = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = []
lowerCAmelCase__ : List[str] = fairseq_model.state_dict()
lowerCAmelCase__ : Any = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
lowerCAmelCase__ : int = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , hf_model.config.feat_extract_norm == """group""" , )
lowerCAmelCase__ : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
lowerCAmelCase__ : List[Any] = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
lowerCAmelCase__ : List[Any] = True
if "*" in mapped_key:
lowerCAmelCase__ : str = name.split(UpperCamelCase )[0].split(""".""" )[-2]
lowerCAmelCase__ : List[str] = mapped_key.replace("""*""" , UpperCamelCase )
if "pos_bias_u" in name:
lowerCAmelCase__ : List[str] = None
elif "pos_bias_v" in name:
lowerCAmelCase__ : List[str] = None
elif "weight_g" in name:
lowerCAmelCase__ : Optional[int] = """weight_g"""
elif "weight_v" in name:
lowerCAmelCase__ : Union[str, Any] = """weight_v"""
elif "bias" in name:
lowerCAmelCase__ : Union[str, Any] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCAmelCase__ : Union[str, Any] = """weight"""
elif "running_mean" in name:
lowerCAmelCase__ : List[Any] = """running_mean"""
elif "inv_freq" in name:
lowerCAmelCase__ : int = """inv_freq"""
elif "running_var" in name:
lowerCAmelCase__ : Any = """running_var"""
elif "num_batches_tracked" in name:
lowerCAmelCase__ : Union[str, Any] = """num_batches_tracked"""
else:
lowerCAmelCase__ : str = None
set_recursively(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
continue
if not is_used:
unused_weights.append(UpperCamelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = full_name.split("""conv_layers.""" )[-1]
lowerCAmelCase__ : Any = name.split(""".""" )
lowerCAmelCase__ : Optional[int] = int(items[0] )
lowerCAmelCase__ : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
lowerCAmelCase__ : List[str] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
lowerCAmelCase__ : Any = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
lowerCAmelCase__ : int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
lowerCAmelCase__ : Union[str, Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCamelCase )
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=True ):
"""simple docstring"""
if config_path is not None:
lowerCAmelCase__ : Tuple = WavaVecaConformerConfig.from_pretrained(UpperCamelCase , hidden_act="""swish""" )
else:
lowerCAmelCase__ : Union[str, Any] = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
lowerCAmelCase__ : Tuple = """rotary"""
if is_finetuned:
if dict_path:
lowerCAmelCase__ : int = Dictionary.load(UpperCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCAmelCase__ : Tuple = target_dict.pad_index
lowerCAmelCase__ : Union[str, Any] = target_dict.bos_index
lowerCAmelCase__ : List[str] = target_dict.eos_index
lowerCAmelCase__ : Dict = len(target_dict.symbols )
lowerCAmelCase__ : Tuple = os.path.join(UpperCamelCase , """vocab.json""" )
if not os.path.isdir(UpperCamelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(UpperCamelCase ) )
return
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCAmelCase__ : Optional[int] = 0
lowerCAmelCase__ : int = 1
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Tuple = WavaVecaCTCTokenizer(
UpperCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=UpperCamelCase , )
lowerCAmelCase__ : int = True if config.feat_extract_norm == """layer""" else False
lowerCAmelCase__ : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=UpperCamelCase , return_attention_mask=UpperCamelCase , )
lowerCAmelCase__ : List[Any] = WavaVecaProcessor(feature_extractor=UpperCamelCase , tokenizer=UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
lowerCAmelCase__ : List[str] = WavaVecaConformerForCTC(UpperCamelCase )
else:
lowerCAmelCase__ : Tuple = WavaVecaConformerForPreTraining(UpperCamelCase )
if is_finetuned:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
lowerCAmelCase__ : List[Any] = argparse.Namespace(task="""audio_pretraining""" )
lowerCAmelCase__ : List[str] = fairseq.tasks.setup_task(UpperCamelCase )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = model[0].eval()
recursively_load_weights(UpperCamelCase , UpperCamelCase , not is_finetuned )
hf_wavavec.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
_lowerCAmelCase = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 37 |
'''simple docstring'''
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''0.12.2'''):
raise Exception('''requires fairseq >= 0.12.2''')
if version.parse(fairseq.__version__) > version.parse('''2'''):
raise Exception('''requires fairseq < v2''')
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = '''Hello, World!'''
_lowerCAmelCase = '''en_XX'''
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : Union[str, Any] = Path("data_bin" )
__UpperCamelCase : Union[str, Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(snake_case__ ).parent ) , checkpoint_file=Path(snake_case__ ).name , _name="xmod_base" , arch="xmod_base" , task="multilingual_masked_lm" , data_name_or_path=str(snake_case__ ) , bpe="sentencepiece" , sentencepiece_model=str(Path(snake_case__ ).parent / "sentencepiece.bpe.model" ) , src_dict=str(data_dir / "dict.txt" ) , )
xmod.eval() # disable dropout
print(snake_case__ )
__UpperCamelCase : List[str] = xmod.model.encoder.sentence_encoder
__UpperCamelCase : Optional[int] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , "bottleneck" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
__UpperCamelCase : Any = xmod.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our X-MOD config:" , snake_case__ )
__UpperCamelCase : Dict = XmodForSequenceClassification(snake_case__ ) if classification_head else XmodForMaskedLM(snake_case__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
__UpperCamelCase : List[Any] = xmod_sent_encoder.embed_tokens.weight
__UpperCamelCase : List[Any] = xmod_sent_encoder.embed_positions.weight
__UpperCamelCase : str = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
__UpperCamelCase : Any = xmod_sent_encoder.layernorm_embedding.weight
__UpperCamelCase : str = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__UpperCamelCase : int = model.roberta.encoder.layer[i]
__UpperCamelCase : Any = xmod_sent_encoder.layers[i]
# self attention
__UpperCamelCase : List[str] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("Dimensions of self-attention weights do not match." )
__UpperCamelCase : Dict = xmod_layer.self_attn.q_proj.weight
__UpperCamelCase : Optional[Any] = xmod_layer.self_attn.q_proj.bias
__UpperCamelCase : Any = xmod_layer.self_attn.k_proj.weight
__UpperCamelCase : Tuple = xmod_layer.self_attn.k_proj.bias
__UpperCamelCase : Union[str, Any] = xmod_layer.self_attn.v_proj.weight
__UpperCamelCase : Any = xmod_layer.self_attn.v_proj.bias
# self-attention output
__UpperCamelCase : Optional[int] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("Dimensions of self-attention output weights do not match." )
__UpperCamelCase : Union[str, Any] = xmod_layer.self_attn.out_proj.weight
__UpperCamelCase : str = xmod_layer.self_attn.out_proj.bias
__UpperCamelCase : Dict = xmod_layer.self_attn_layer_norm.weight
__UpperCamelCase : Any = xmod_layer.self_attn_layer_norm.bias
# intermediate
__UpperCamelCase : Dict = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of intermediate weights do not match." )
__UpperCamelCase : List[Any] = xmod_layer.fca.weight
__UpperCamelCase : Optional[int] = xmod_layer.fca.bias
# output
__UpperCamelCase : List[Any] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of feed-forward weights do not match." )
__UpperCamelCase : Tuple = xmod_layer.fca.weight
__UpperCamelCase : int = xmod_layer.fca.bias
__UpperCamelCase : Dict = xmod_layer.final_layer_norm.weight
__UpperCamelCase : int = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
__UpperCamelCase : Any = xmod_layer.adapter_layer_norm.weight
__UpperCamelCase : int = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("Lists of language adapters do not match." )
for lang_code, adapter in xmod_layer.adapter_modules.items():
__UpperCamelCase : Any = bert_output.adapter_modules[lang_code]
__UpperCamelCase : Dict = xmod_layer.adapter_modules[lang_code]
__UpperCamelCase : int = from_adapter.fca.weight
__UpperCamelCase : Dict = from_adapter.fca.bias
__UpperCamelCase : List[Any] = from_adapter.fca.weight
__UpperCamelCase : int = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
__UpperCamelCase : Tuple = xmod_sent_encoder.layer_norm.weight
__UpperCamelCase : List[Any] = xmod_sent_encoder.layer_norm.bias
if classification_head:
__UpperCamelCase : Optional[Any] = xmod.model.classification_heads["mnli"].dense.weight
__UpperCamelCase : Any = xmod.model.classification_heads["mnli"].dense.bias
__UpperCamelCase : Tuple = xmod.model.classification_heads["mnli"].out_proj.weight
__UpperCamelCase : List[Any] = xmod.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
__UpperCamelCase : Any = xmod.model.encoder.lm_head.dense.weight
__UpperCamelCase : Optional[Any] = xmod.model.encoder.lm_head.dense.bias
__UpperCamelCase : Tuple = xmod.model.encoder.lm_head.layer_norm.weight
__UpperCamelCase : List[Any] = xmod.model.encoder.lm_head.layer_norm.bias
__UpperCamelCase : Tuple = xmod.model.encoder.lm_head.weight
__UpperCamelCase : Any = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
__UpperCamelCase : Any = xmod.encode(snake_case__ ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(snake_case__ )
__UpperCamelCase : Optional[Any] = model(snake_case__ )[0]
if classification_head:
__UpperCamelCase : int = xmod.model.classification_heads["mnli"](xmod.extract_features(snake_case__ ) )
else:
__UpperCamelCase : Optional[Any] = xmod.model(snake_case__ , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
__UpperCamelCase : Dict = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"max_absolute_diff = {max_absolute_diff}" ) # ~ 1e-7
__UpperCamelCase : Union[str, Any] = torch.allclose(snake_case__ , snake_case__ , atol=1E-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
Path(snake_case__ ).mkdir(parents=snake_case__ , exist_ok=snake_case__ )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
_lowerCAmelCase = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 298 | 0 |
'''simple docstring'''
UpperCamelCase_ : str = [
'''DownloadConfig''',
'''DownloadManager''',
'''DownloadMode''',
'''StreamingDownloadManager''',
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 142 |
'''simple docstring'''
from manim import *
class _a ( __lowerCAmelCase ):
def _lowercase ( self ) -> Optional[int]:
_snake_case = Rectangle(height=0.5 ,width=0.5 )
_snake_case = Rectangle(height=0.4_6 ,width=0.4_6 ).set_stroke(width=0 )
_snake_case = [mem.copy() for i in range(6 )]
_snake_case = [mem.copy() for i in range(6 )]
_snake_case = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE ,buff=0 )
_snake_case = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE ,buff=0 )
_snake_case = VGroup(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE ,buff=0 )
_snake_case = Text("CPU" ,font_size=24 )
_snake_case = Group(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE ,buff=0.5 ,aligned_edge=_SCREAMING_SNAKE_CASE )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_SCREAMING_SNAKE_CASE )
_snake_case = [mem.copy() for i in range(4 )]
_snake_case = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE ,buff=0 )
_snake_case = Text("GPU" ,font_size=24 )
_snake_case = Group(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE ,buff=0.5 ,aligned_edge=_SCREAMING_SNAKE_CASE )
gpu.move_to([-1, -1, 0] )
self.add(_SCREAMING_SNAKE_CASE )
_snake_case = [mem.copy() for i in range(6 )]
_snake_case = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE ,buff=0 )
_snake_case = Text("Model" ,font_size=24 )
_snake_case = Group(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE ,buff=0.5 ,aligned_edge=_SCREAMING_SNAKE_CASE )
model.move_to([3, -1.0, 0] )
self.add(_SCREAMING_SNAKE_CASE )
_snake_case = []
for i, rect in enumerate(_SCREAMING_SNAKE_CASE ):
rect.set_stroke(_SCREAMING_SNAKE_CASE )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
_snake_case = Rectangle(height=0.4_6 / 4 ,width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(_SCREAMING_SNAKE_CASE ,opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) ,buff=0.0_2 ,direction=_SCREAMING_SNAKE_CASE )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] ,direction=_SCREAMING_SNAKE_CASE ,buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] ,direction=_SCREAMING_SNAKE_CASE ,buff=0.0 )
self.add(_SCREAMING_SNAKE_CASE )
cpu_targs.append(_SCREAMING_SNAKE_CASE )
_snake_case = [mem.copy() for i in range(6 )]
_snake_case = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE ,buff=0 )
_snake_case = Text("Loaded Checkpoint" ,font_size=24 )
_snake_case = Group(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE ,aligned_edge=_SCREAMING_SNAKE_CASE ,buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
_snake_case = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_snake_case = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
self.add(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
_snake_case = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" ,font_size=18 ,)
blue_text.next_to(_SCREAMING_SNAKE_CASE ,DOWN * 2.4 ,aligned_edge=key_text.get_left() )
_snake_case = MarkupText(
f"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(_SCREAMING_SNAKE_CASE ) ,Write(_SCREAMING_SNAKE_CASE ) )
self.play(Write(_SCREAMING_SNAKE_CASE ,run_time=1 ) ,Create(_SCREAMING_SNAKE_CASE ,run_time=1 ) )
_snake_case = []
_snake_case = []
for i, rect in enumerate(_SCREAMING_SNAKE_CASE ):
_snake_case = fill.copy().set_fill(_SCREAMING_SNAKE_CASE ,opacity=0.7 )
target.move_to(_SCREAMING_SNAKE_CASE )
first_animations.append(GrowFromCenter(_SCREAMING_SNAKE_CASE ,run_time=1 ) )
_snake_case = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(_SCREAMING_SNAKE_CASE ,run_time=1.5 ) )
self.play(*_SCREAMING_SNAKE_CASE )
self.play(*_SCREAMING_SNAKE_CASE )
self.wait()
| 142 | 1 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'''Visual-Attention-Network/van-base''': (
'''https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'''
),
}
class _snake_case ( __lowerCamelCase ):
'''simple docstring'''
A__ : Optional[int] = """van"""
def __init__( self: Tuple ,lowerCamelCase_: Any=224 ,lowerCamelCase_: Dict=3 ,lowerCamelCase_: Union[str, Any]=[7, 3, 3, 3] ,lowerCamelCase_: Union[str, Any]=[4, 2, 2, 2] ,lowerCamelCase_: Tuple=[64, 128, 320, 512] ,lowerCamelCase_: int=[3, 3, 12, 3] ,lowerCamelCase_: List[str]=[8, 8, 4, 4] ,lowerCamelCase_: str="gelu" ,lowerCamelCase_: Any=0.0_2 ,lowerCamelCase_: str=1e-6 ,lowerCamelCase_: int=1e-2 ,lowerCamelCase_: Optional[int]=0.0 ,lowerCamelCase_: Any=0.0 ,**lowerCamelCase_: Optional[int] ,) -> List[Any]:
super().__init__(**__lowercase )
UpperCAmelCase_ : Optional[Any] = image_size
UpperCAmelCase_ : Union[str, Any] = num_channels
UpperCAmelCase_ : Dict = patch_sizes
UpperCAmelCase_ : Any = strides
UpperCAmelCase_ : Tuple = hidden_sizes
UpperCAmelCase_ : Optional[int] = depths
UpperCAmelCase_ : List[Any] = mlp_ratios
UpperCAmelCase_ : Dict = hidden_act
UpperCAmelCase_ : Optional[Any] = initializer_range
UpperCAmelCase_ : Any = layer_norm_eps
UpperCAmelCase_ : Union[str, Any] = layer_scale_init_value
UpperCAmelCase_ : int = drop_path_rate
UpperCAmelCase_ : Optional[int] = dropout_rate
| 345 |
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def _lowerCAmelCase ( ) -> None:
"""simple docstring"""
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 230 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : List[Any] = '''megatron-bert'''
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int=2_9_0_5_6 , SCREAMING_SNAKE_CASE__ : Dict=1_0_2_4 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2_4 , SCREAMING_SNAKE_CASE__ : List[str]=1_6 , SCREAMING_SNAKE_CASE__ : List[str]=4_0_9_6 , SCREAMING_SNAKE_CASE__ : str="gelu" , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=5_1_2 , SCREAMING_SNAKE_CASE__ : List[str]=2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE__ : Dict=1E-12 , SCREAMING_SNAKE_CASE__ : Dict=0 , SCREAMING_SNAKE_CASE__ : Dict="absolute" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , **SCREAMING_SNAKE_CASE__ : Optional[int] , ) -> str:
super().__init__(pad_token_id=lowercase_ , **lowercase_ )
a_ : Union[str, Any] = vocab_size
a_ : Union[str, Any] = hidden_size
a_ : Optional[Any] = num_hidden_layers
a_ : Dict = num_attention_heads
a_ : int = hidden_act
a_ : List[str] = intermediate_size
a_ : Tuple = hidden_dropout_prob
a_ : Optional[int] = attention_probs_dropout_prob
a_ : Union[str, Any] = max_position_embeddings
a_ : List[Any] = type_vocab_size
a_ : Optional[int] = initializer_range
a_ : Optional[Any] = layer_norm_eps
a_ : str = position_embedding_type
a_ : Any = use_cache
| 369 |
from __future__ import annotations
UpperCAmelCase_ : Dict = [True] * 100_0001
UpperCAmelCase_ : Any = 2
while i * i <= 100_0000:
if seive[i]:
for j in range(i * i, 100_0001, i):
UpperCAmelCase_ : Tuple = False
i += 1
def SCREAMING_SNAKE_CASE_ ( __A : int ) -> bool:
"""simple docstring"""
return seive[n]
def SCREAMING_SNAKE_CASE_ ( __A : int ) -> bool:
"""simple docstring"""
return any(digit in '02468' for digit in str(__A ) )
def SCREAMING_SNAKE_CASE_ ( __A : int = 1_00_00_00 ) -> list[int]:
"""simple docstring"""
a_ : Dict = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(__A ) and not contains_an_even_digit(__A ):
a_ : Dict = str(__A )
a_ : Any = [int(str_num[j:] + str_num[:j] ) for j in range(len(__A ) )]
if all(is_prime(__A ) for i in list_nums ):
result.append(__A )
return result
def SCREAMING_SNAKE_CASE_ ( ) -> int:
"""simple docstring"""
return len(find_circular_primes() )
if __name__ == "__main__":
print(F'{len(find_circular_primes()) = }')
| 120 | 0 |
'''simple docstring'''
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str=[] ) -> Dict:
__lowerCamelCase : Tuple = size[0] - overlap_pixels * 2
__lowerCamelCase : str = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
__lowerCamelCase : Any = np.ones((size_y, size_x) , dtype=np.uinta ) * 2_55
__lowerCamelCase : Dict = np.pad(snake_case__ , mode='linear_ramp' , pad_width=snake_case__ , end_values=0 )
if "l" in remove_borders:
__lowerCamelCase : List[Any] = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
__lowerCamelCase : Optional[int] = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
__lowerCamelCase : List[Any] = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
__lowerCamelCase : List[str] = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def UpperCAmelCase__ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] ) -> Tuple:
return max(snake_case__ , min(snake_case__ , snake_case__ ) )
def UpperCAmelCase__ ( UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : int ) -> int:
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def UpperCAmelCase__ ( UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] ) -> int:
__lowerCamelCase : List[str] = list(snake_case__ )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
__lowerCamelCase : Tuple = clamp_rect(snake_case__ , [0, 0] , [image_size[0], image_size[1]] )
return rect
def UpperCAmelCase__ ( UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any ) -> Any:
__lowerCamelCase : Optional[Any] = Image.new('RGB' , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(snake_case__ , (original_slice, 0) )
return result
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int ) -> Dict:
__lowerCamelCase : str = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
__lowerCamelCase : Tuple = tile.crop(snake_case__ )
return tile
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple ) -> Any:
__lowerCamelCase : Union[str, Any] = n % d
return n - divisor
class UpperCAmelCase_ (_lowercase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 3_50 , ) -> Dict:
super().__init__(
vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , unet=__lowerCamelCase , low_res_scheduler=__lowerCamelCase , scheduler=__lowerCamelCase , max_noise_level=__lowerCamelCase , )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
torch.manual_seed(0 )
__lowerCamelCase : Optional[int] = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
__lowerCamelCase : Optional[Any] = add_overlap_rect(__lowerCamelCase , __lowerCamelCase , image.size )
__lowerCamelCase : str = image.crop(__lowerCamelCase )
__lowerCamelCase : str = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
__lowerCamelCase : Tuple = translated_slice_x - (original_image_slice / 2)
__lowerCamelCase : List[str] = max(0 , __lowerCamelCase )
__lowerCamelCase : int = squeeze_tile(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__lowerCamelCase : Optional[Any] = to_input.size
__lowerCamelCase : Any = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
__lowerCamelCase : List[Any] = super(__lowerCamelCase , self ).__call__(image=__lowerCamelCase , **__lowerCamelCase ).images[0]
__lowerCamelCase : Dict = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
__lowerCamelCase : Tuple = unsqueeze_tile(__lowerCamelCase , __lowerCamelCase )
__lowerCamelCase : str = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
__lowerCamelCase : List[Any] = []
if x == 0:
remove_borders.append('l' )
elif crop_rect[2] == image.size[0]:
remove_borders.append('r' )
if y == 0:
remove_borders.append('t' )
elif crop_rect[3] == image.size[1]:
remove_borders.append('b' )
__lowerCamelCase : List[str] = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=__lowerCamelCase ) , mode='L' , )
final_image.paste(
__lowerCamelCase , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , __lowerCamelCase )
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 75 , SCREAMING_SNAKE_CASE_ = 9.0 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 1_28 , SCREAMING_SNAKE_CASE_ = 32 , SCREAMING_SNAKE_CASE_ = 32 , ) -> int:
__lowerCamelCase : Optional[Any] = Image.new('RGB' , (image.size[0] * 4, image.size[1] * 4) )
__lowerCamelCase : List[Any] = math.ceil(image.size[0] / tile_size )
__lowerCamelCase : str = math.ceil(image.size[1] / tile_size )
__lowerCamelCase : Union[str, Any] = tcx * tcy
__lowerCamelCase : int = 0
for y in range(__lowerCamelCase ):
for x in range(__lowerCamelCase ):
self._process_tile(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , prompt=__lowerCamelCase , num_inference_steps=__lowerCamelCase , guidance_scale=__lowerCamelCase , noise_level=__lowerCamelCase , negative_prompt=__lowerCamelCase , num_images_per_prompt=__lowerCamelCase , eta=__lowerCamelCase , generator=__lowerCamelCase , latents=__lowerCamelCase , )
current_count += 1
if callback is not None:
callback({'progress': current_count / total_tile_count, 'image': final_image} )
return final_image
def UpperCAmelCase__ ( ) -> Optional[Any]:
# Run a demo
__lowerCamelCase : List[Any] = "stabilityai/stable-diffusion-x4-upscaler"
__lowerCamelCase : Any = StableDiffusionTiledUpscalePipeline.from_pretrained(snake_case__ , revision='fp16' , torch_dtype=torch.floataa )
__lowerCamelCase : str = pipe.to('cuda' )
__lowerCamelCase : Tuple = Image.open('../../docs/source/imgs/diffusers_library.jpg' )
def callback(UpperCAmelCase_ : Optional[Any] ):
print(F'progress: {obj["progress"]:.4f}' )
obj["image"].save('diffusers_library_progress.jpg' )
__lowerCamelCase : List[Any] = pipe(image=snake_case__ , prompt='Black font, white background, vector' , noise_level=40 , callback=snake_case__ )
final_image.save('diffusers_library.jpg' )
if __name__ == "__main__":
main()
| 185 | import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
_snake_case = logging.getLogger(__name__)
_snake_case = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
_snake_case = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _snake_case :
lowerCamelCase__: Optional[str] = field(
default=_lowercase , metadata={
"help": (
"The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."
)
} , )
lowerCamelCase__: Optional[str] = field(
default=_lowercase , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(_lowercase )} , )
lowerCamelCase__: Optional[str] = field(
default=_lowercase , metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} , )
lowerCamelCase__: Optional[str] = field(
default=_lowercase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCamelCase__: Optional[str] = field(
default=_lowercase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
lowerCamelCase__: Optional[str] = field(
default=_lowercase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
lowerCamelCase__: bool = field(
default=_lowercase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
lowerCamelCase__: str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowerCamelCase__: bool = field(
default=_lowercase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
def _lowerCamelCase ( self: str ) -> Tuple:
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"--config_overrides can't be used in combination with --config_name or --model_name_or_path" )
@dataclass
class _snake_case :
lowerCamelCase__: Optional[str] = field(
default=_lowercase , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
lowerCamelCase__: Optional[str] = field(
default=_lowercase , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
lowerCamelCase__: Optional[str] = field(default=_lowercase , metadata={"help": "The input training data file (a text file)."} )
lowerCamelCase__: Optional[str] = field(
default=_lowercase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
lowerCamelCase__: Optional[str] = field(
default=_lowercase , metadata={"help": "An optional input train ref data file for whole word masking in Chinese."} , )
lowerCamelCase__: Optional[str] = field(
default=_lowercase , metadata={"help": "An optional input validation ref data file for whole word masking in Chinese."} , )
lowerCamelCase__: bool = field(
default=_lowercase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
lowerCamelCase__: Optional[int] = field(
default=5 , metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
} , )
lowerCamelCase__: Optional[int] = field(
default=_lowercase , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated. Default to the max input length of the model."
)
} , )
lowerCamelCase__: Optional[int] = field(
default=_lowercase , metadata={"help": "The number of processes to use for the preprocessing."} , )
lowerCamelCase__: float = field(
default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} )
lowerCamelCase__: bool = field(
default=_lowercase , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
def _lowerCamelCase ( self: Any ) -> Tuple:
if self.train_file is not None:
__UpperCAmelCase : Optional[int] = self.train_file.split("." )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
__UpperCAmelCase : str = self.validation_file.split("." )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Optional[int]:
with open(snake_case__, "r", encoding="utf-8" ) as f:
__UpperCAmelCase : List[str] = [json.loads(snake_case__ ) for line in f.read().splitlines() if (len(snake_case__ ) > 0 and not line.isspace())]
assert len(snake_case__ ) == len(snake_case__ )
__UpperCAmelCase : Optional[int] = {c: dataset[c] for c in dataset.column_names}
__UpperCAmelCase : Any = refs
return Dataset.from_dict(snake_case__ )
def _UpperCamelCase ( ) -> str:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__UpperCAmelCase : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__UpperCAmelCase : int = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__UpperCAmelCase : List[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout )], )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s", snake_case__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__UpperCAmelCase : Optional[Any] = load_dataset(data_args.dataset_name, data_args.dataset_config_name )
if "validation" not in datasets.keys():
__UpperCAmelCase : Dict = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=f'''train[:{data_args.validation_split_percentage}%]''', )
__UpperCAmelCase : List[str] = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=f'''train[{data_args.validation_split_percentage}%:]''', )
else:
__UpperCAmelCase : List[Any] = {}
if data_args.train_file is not None:
__UpperCAmelCase : Optional[int] = data_args.train_file
if data_args.validation_file is not None:
__UpperCAmelCase : List[str] = data_args.validation_file
__UpperCAmelCase : Tuple = data_args.train_file.split("." )[-1]
if extension == "txt":
__UpperCAmelCase : str = "text"
__UpperCAmelCase : List[Any] = load_dataset(snake_case__, data_files=snake_case__ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCAmelCase : Tuple = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
__UpperCAmelCase : Any = AutoConfig.from_pretrained(model_args.config_name, **snake_case__ )
elif model_args.model_name_or_path:
__UpperCAmelCase : int = AutoConfig.from_pretrained(model_args.model_name_or_path, **snake_case__ )
else:
__UpperCAmelCase : str = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.config_overrides is not None:
logger.info(f'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(f'''New config: {config}''' )
__UpperCAmelCase : List[Any] = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
__UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **snake_case__ )
elif model_args.model_name_or_path:
__UpperCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **snake_case__ )
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name." )
if model_args.model_name_or_path:
__UpperCAmelCase : int = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path ), config=snake_case__, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
else:
logger.info("Training new model from scratch" )
__UpperCAmelCase : Any = AutoModelForMaskedLM.from_config(snake_case__ )
model.resize_token_embeddings(len(snake_case__ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
__UpperCAmelCase : List[str] = datasets["train"].column_names
else:
__UpperCAmelCase : Union[str, Any] = datasets["validation"].column_names
__UpperCAmelCase : Union[str, Any] = "text" if "text" in column_names else column_names[0]
__UpperCAmelCase : Any = "max_length" if data_args.pad_to_max_length else False
def tokenize_function(snake_case__ ):
# Remove empty lines
__UpperCAmelCase : Any = [line for line in examples["text"] if len(snake_case__ ) > 0 and not line.isspace()]
return tokenizer(examples["text"], padding=snake_case__, truncation=snake_case__, max_length=data_args.max_seq_length )
__UpperCAmelCase : List[str] = datasets.map(
snake_case__, batched=snake_case__, num_proc=data_args.preprocessing_num_workers, remove_columns=[text_column_name], load_from_cache_file=not data_args.overwrite_cache, )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
__UpperCAmelCase : str = add_chinese_references(tokenized_datasets["train"], data_args.train_ref_file )
if data_args.validation_ref_file is not None:
__UpperCAmelCase : List[str] = add_chinese_references(
tokenized_datasets["validation"], data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
__UpperCAmelCase : List[str] = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
__UpperCAmelCase : Tuple = False
# Data collator
# This one will take care of randomly masking the tokens.
__UpperCAmelCase : Optional[Any] = DataCollatorForWholeWordMask(tokenizer=snake_case__, mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__UpperCAmelCase : str = Trainer(
model=snake_case__, args=snake_case__, train_dataset=tokenized_datasets["train"] if training_args.do_train else None, eval_dataset=tokenized_datasets["validation"] if training_args.do_eval else None, tokenizer=snake_case__, data_collator=snake_case__, )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__UpperCAmelCase : int = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
__UpperCAmelCase : Any = model_args.model_name_or_path
else:
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : str = trainer.train(resume_from_checkpoint=snake_case__ )
trainer.save_model() # Saves the tokenizer too for easy upload
__UpperCAmelCase : str = os.path.join(training_args.output_dir, "train_results.txt" )
if trainer.is_world_process_zero():
with open(snake_case__, "w" ) as writer:
logger.info("***** Train results *****" )
for key, value in sorted(train_result.metrics.items() ):
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir, "trainer_state.json" ) )
# Evaluation
__UpperCAmelCase : Dict = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
__UpperCAmelCase : List[Any] = trainer.evaluate()
__UpperCAmelCase : int = math.exp(eval_output["eval_loss"] )
__UpperCAmelCase : Union[str, Any] = perplexity
__UpperCAmelCase : List[Any] = os.path.join(training_args.output_dir, "eval_results_mlm_wwm.txt" )
if trainer.is_world_process_zero():
with open(snake_case__, "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in sorted(results.items() ):
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
return results
def _UpperCamelCase ( snake_case__ ) -> Dict:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 157 | 0 |
def __lowercase ( lowerCamelCase : List[str] ):
UpperCamelCase_ : Dict = len(__A )
while cur > 1:
# Find the maximum number in arr
UpperCamelCase_ : str = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
UpperCamelCase_ : int = arr[mi::-1] + arr[mi + 1 : len(__A )]
# Reverse whole list
UpperCamelCase_ : Optional[int] = arr[cur - 1 :: -1] + arr[cur : len(__A )]
cur -= 1
return arr
if __name__ == "__main__":
a_ = input('Enter numbers separated by a comma:\n').strip()
a_ = [int(item) for item in user_input.split(',')]
print(pancake_sort(unsorted))
| 357 | from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class _lowercase ( snake_case_ , snake_case_ , unittest.TestCase ):
lowercase = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowercase = (
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase = False
lowercase = False
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case : Tuple , snake_case : Optional[Any] , snake_case : Dict=False ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : List[Any] = super()._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
if return_labels:
if model_class in get_values(snake_case ):
UpperCamelCase_ : Optional[int] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class _lowercase ( snake_case_ ):
def __init__( self : Tuple , snake_case : Optional[int] , snake_case : Optional[Any]=1_3 , snake_case : Optional[Any]=7 , snake_case : Any=True , snake_case : Optional[int]=True , snake_case : Union[str, Any]=True , snake_case : Optional[Any]=True , snake_case : List[Any]=9_9 , snake_case : int=3_2 , snake_case : str=3_2 , snake_case : str=2 , snake_case : List[Any]=4 , snake_case : Tuple=3_7 , snake_case : Any="gelu" , snake_case : str=0.1 , snake_case : Tuple=0.1 , snake_case : Optional[Any]=5_1_2 , snake_case : Optional[int]=1_6 , snake_case : List[Any]=2 , snake_case : Dict=0.02 , snake_case : List[str]=3 , snake_case : Any=4 , snake_case : Any=None , ) -> int:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = parent
UpperCamelCase_ : Any = batch_size
UpperCamelCase_ : List[str] = seq_length
UpperCamelCase_ : List[Any] = is_training
UpperCamelCase_ : Optional[Any] = use_input_mask
UpperCamelCase_ : Tuple = use_token_type_ids
UpperCamelCase_ : Optional[int] = use_labels
UpperCamelCase_ : Dict = vocab_size
UpperCamelCase_ : Dict = hidden_size
UpperCamelCase_ : List[str] = num_hidden_layers
UpperCamelCase_ : Tuple = num_attention_heads
UpperCamelCase_ : Optional[int] = intermediate_size
UpperCamelCase_ : int = hidden_act
UpperCamelCase_ : List[str] = hidden_dropout_prob
UpperCamelCase_ : Optional[Any] = attention_probs_dropout_prob
UpperCamelCase_ : Tuple = max_position_embeddings
UpperCamelCase_ : Tuple = type_vocab_size
UpperCamelCase_ : Optional[Any] = type_sequence_label_size
UpperCamelCase_ : Any = initializer_range
UpperCamelCase_ : Tuple = num_labels
UpperCamelCase_ : Tuple = num_choices
UpperCamelCase_ : Tuple = scope
UpperCamelCase_ : Dict = embedding_size
def SCREAMING_SNAKE_CASE__ ( self : str ) -> str:
"""simple docstring"""
UpperCamelCase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ : Optional[Any] = None
if self.use_input_mask:
UpperCamelCase_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase_ : Union[str, Any] = None
if self.use_token_type_ids:
UpperCamelCase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase_ : Optional[int] = None
UpperCamelCase_ : Tuple = None
UpperCamelCase_ : Dict = None
if self.use_labels:
UpperCamelCase_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase_ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase_ : Union[str, Any] = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case : str , snake_case : Optional[int] , snake_case : Optional[Any] , snake_case : Tuple , snake_case : str , snake_case : Optional[Any] , snake_case : List[str] ) -> int:
"""simple docstring"""
UpperCamelCase_ : str = TFMobileBertModel(config=snake_case )
UpperCamelCase_ : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCamelCase_ : Union[str, Any] = model(snake_case )
UpperCamelCase_ : Optional[Any] = [input_ids, input_mask]
UpperCamelCase_ : List[Any] = model(snake_case )
UpperCamelCase_ : Union[str, Any] = model(snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : int , snake_case : Union[str, Any] , snake_case : Optional[int] , snake_case : Union[str, Any] , snake_case : Dict , snake_case : Any , snake_case : Dict , snake_case : int ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : List[str] = TFMobileBertForMaskedLM(config=snake_case )
UpperCamelCase_ : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCamelCase_ : int = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case : Any , snake_case : int , snake_case : int , snake_case : Union[str, Any] , snake_case : Optional[int] , snake_case : Optional[Any] , snake_case : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = TFMobileBertForNextSentencePrediction(config=snake_case )
UpperCamelCase_ : Any = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCamelCase_ : List[Any] = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case : Optional[Any] , snake_case : List[Any] , snake_case : int , snake_case : str , snake_case : str , snake_case : Any , snake_case : List[Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : List[str] = TFMobileBertForPreTraining(config=snake_case )
UpperCamelCase_ : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCamelCase_ : Any = model(snake_case )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case : Optional[int] , snake_case : Optional[Any] , snake_case : Dict , snake_case : List[str] , snake_case : str , snake_case : List[str] , snake_case : List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : List[Any] = self.num_labels
UpperCamelCase_ : Dict = TFMobileBertForSequenceClassification(config=snake_case )
UpperCamelCase_ : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCamelCase_ : List[Any] = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case : Tuple , snake_case : Any , snake_case : Optional[int] , snake_case : Optional[Any] , snake_case : str , snake_case : List[str] , snake_case : Any ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = self.num_choices
UpperCamelCase_ : Dict = TFMobileBertForMultipleChoice(config=snake_case )
UpperCamelCase_ : int = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase_ : int = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase_ : List[str] = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase_ : Optional[Any] = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
UpperCamelCase_ : int = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case : Optional[int] , snake_case : Tuple , snake_case : str , snake_case : str , snake_case : Optional[int] , snake_case : str , snake_case : List[str] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : Any = self.num_labels
UpperCamelCase_ : Optional[Any] = TFMobileBertForTokenClassification(config=snake_case )
UpperCamelCase_ : str = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCamelCase_ : Tuple = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : int , snake_case : Tuple , snake_case : Tuple , snake_case : Union[str, Any] , snake_case : Optional[int] , snake_case : Union[str, Any] , snake_case : Dict , snake_case : List[str] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = TFMobileBertForQuestionAnswering(config=snake_case )
UpperCamelCase_ : int = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCamelCase_ : Tuple = model(snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Tuple = self.prepare_config_and_inputs()
(
(
UpperCamelCase_
), (
UpperCamelCase_
), (
UpperCamelCase_
), (
UpperCamelCase_
), (
UpperCamelCase_
), (
UpperCamelCase_
), (
UpperCamelCase_
),
) : Union[str, Any] = config_and_inputs
UpperCamelCase_ : Tuple = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = TFMobileBertModelTest.TFMobileBertModelTester(self )
UpperCamelCase_ : str = ConfigTester(self , config_class=snake_case , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : int ) -> str:
"""simple docstring"""
UpperCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str:
"""simple docstring"""
UpperCamelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str:
"""simple docstring"""
UpperCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*snake_case )
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Tuple:
"""simple docstring"""
for model_name in ["google/mobilebert-uncased"]:
UpperCamelCase_ : Optional[Any] = TFMobileBertModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_tf
class _lowercase ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Any = TFMobileBertForPreTraining.from_pretrained('google/mobilebert-uncased' )
UpperCamelCase_ : List[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase_ : List[str] = model(snake_case )[0]
UpperCamelCase_ : Any = [1, 6, 3_0_5_2_2]
self.assertEqual(output.shape , snake_case )
UpperCamelCase_ : Dict = tf.constant(
[
[
[-4.5919547, -9.248295, -9.645256],
[-6.7306175, -6.440284, -6.6052837],
[-7.2743506, -6.7847915, -6.024673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , snake_case , atol=1e-4 )
| 50 | 0 |
'''simple docstring'''
import string
from math import logaa
def _UpperCamelCase ( __A , __A ) -> int:
'''simple docstring'''
UpperCamelCase__ = document.translate(
str.maketrans("" , "" , string.punctuation ) ).replace("\n" , "" )
UpperCamelCase__ = document_without_punctuation.split(" " ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def _UpperCamelCase ( __A , __A ) -> tuple[int, int]:
'''simple docstring'''
UpperCamelCase__ = corpus.lower().translate(
str.maketrans("" , "" , string.punctuation ) ) # strip all punctuation and replace it with ''
UpperCamelCase__ = corpus_without_punctuation.split("\n" )
UpperCamelCase__ = term.lower()
return (len([doc for doc in docs if term in doc] ), len(__A ))
def _UpperCamelCase ( __A , __A , __A=False ) -> float:
'''simple docstring'''
if smoothing:
if n == 0:
raise ValueError("log10(0) is undefined." )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("df must be > 0" )
elif n == 0:
raise ValueError("log10(0) is undefined." )
return round(logaa(n / df ) , 3 )
def _UpperCamelCase ( __A , __A ) -> float:
'''simple docstring'''
return round(tf * idf , 3 )
| 80 |
'''simple docstring'''
def _UpperCamelCase ( __A ) -> int:
'''simple docstring'''
UpperCamelCase__ = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def _UpperCamelCase ( __A = 100 ) -> int:
'''simple docstring'''
UpperCamelCase__ = 1
UpperCamelCase__ = 2
for i in range(2 , max_n + 1 ):
UpperCamelCase__ = pre_numerator
UpperCamelCase__ = 2 * i // 3 if i % 3 == 0 else 1
UpperCamelCase__ = cur_numerator
UpperCamelCase__ = e_cont * pre_numerator + temp
return sum_digits(__A )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 80 | 1 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
a_ = version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11')
def _a( UpperCamelCase__ : Optional[Any], UpperCamelCase__ : tuple, UpperCamelCase__ : Path, UpperCamelCase__ : Tuple, UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Tuple, UpperCamelCase__ : int, UpperCamelCase__ : Optional[int]=False, ):
'''simple docstring'''
output_path.parent.mkdir(parents=UpperCamelCase__, exist_ok=UpperCamelCase__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
UpperCamelCase__, UpperCamelCase__, f=output_path.as_posix(), input_names=UpperCamelCase__, output_names=UpperCamelCase__, dynamic_axes=UpperCamelCase__, do_constant_folding=UpperCamelCase__, use_external_data_format=UpperCamelCase__, enable_onnx_checker=UpperCamelCase__, opset_version=UpperCamelCase__, )
else:
export(
UpperCamelCase__, UpperCamelCase__, f=output_path.as_posix(), input_names=UpperCamelCase__, output_names=UpperCamelCase__, dynamic_axes=UpperCamelCase__, do_constant_folding=UpperCamelCase__, opset_version=UpperCamelCase__, )
@torch.no_grad()
def _a( UpperCamelCase__ : str, UpperCamelCase__ : str, UpperCamelCase__ : int, UpperCamelCase__ : bool = False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
SCREAMING_SNAKE_CASE__ : Optional[Any] ='''cuda'''
elif fpaa and not torch.cuda.is_available():
raise ValueError('''`float16` model export is only supported on GPUs with CUDA''' )
else:
SCREAMING_SNAKE_CASE__ : Dict ='''cpu'''
SCREAMING_SNAKE_CASE__ : int =Path(UpperCamelCase__ )
# VAE DECODER
SCREAMING_SNAKE_CASE__ : Optional[Any] =AutoencoderKL.from_pretrained(model_path + '''/vae''' )
SCREAMING_SNAKE_CASE__ : Dict =vae_decoder.config.latent_channels
# forward only through the decoder part
SCREAMING_SNAKE_CASE__ : Dict =vae_decoder.decode
onnx_export(
UpperCamelCase__, model_args=(
torch.randn(1, UpperCamelCase__, 2_5, 2_5 ).to(device=UpperCamelCase__, dtype=UpperCamelCase__ ),
False,
), output_path=output_path / '''vae_decoder''' / '''model.onnx''', ordered_input_names=['''latent_sample''', '''return_dict'''], output_names=['''sample'''], dynamic_axes={
'''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
}, opset=UpperCamelCase__, )
del vae_decoder
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--model_path',
type=str,
required=True,
help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).',
)
parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--opset',
default=1_4,
type=int,
help='The version of the ONNX operator set to use.',
)
parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode')
a_ = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('SD: Done: ONNX') | 222 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
'configuration_megatron_bert': ['MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegatronBertConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegatronBertForCausalLM',
'MegatronBertForMaskedLM',
'MegatronBertForMultipleChoice',
'MegatronBertForNextSentencePrediction',
'MegatronBertForPreTraining',
'MegatronBertForQuestionAnswering',
'MegatronBertForSequenceClassification',
'MegatronBertForTokenClassification',
'MegatronBertModel',
'MegatronBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 222 | 1 |
'''simple docstring'''
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
A__ : Optional[int] =get_logger(__name__)
class UpperCAmelCase :
_lowercase: Dict = '''dummy_data'''
_lowercase: Any = '''datasets'''
_lowercase: Dict = False
def __init__( self : Optional[Any] , __snake_case : str , __snake_case : str , __snake_case : Union[Version, str] , __snake_case : Optional[str] = None , __snake_case : bool = False , __snake_case : bool = True , __snake_case : Optional[List[Callable]] = None , ) -> str:
_lowerCAmelCase = 0
_lowerCAmelCase = dataset_name
_lowerCAmelCase = cache_dir
_lowerCAmelCase = use_local_dummy_data
_lowerCAmelCase = config
# download_callbacks take a single url as input
_lowerCAmelCase = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
_lowerCAmelCase = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
_lowerCAmelCase = str(__snake_case )
# to be downloaded
_lowerCAmelCase = None
_lowerCAmelCase = None
@property
def lowercase__ ( self : List[str] ) -> int:
if self._dummy_file is None:
_lowerCAmelCase = self.download_dummy_data()
return self._dummy_file
@property
def lowercase__ ( self : int ) -> str:
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("""dummy""" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("""dummy""" , self.version_name )
@property
def lowercase__ ( self : List[Any] ) -> Optional[int]:
return os.path.join(self.dummy_data_folder , """dummy_data.zip""" )
def lowercase__ ( self : int ) -> Union[str, Any]:
_lowerCAmelCase = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
_lowerCAmelCase = cached_path(
__snake_case , cache_dir=self.cache_dir , extract_compressed_file=__snake_case , force_extract=__snake_case )
return os.path.join(__snake_case , self.dummy_file_name )
@property
def lowercase__ ( self : Dict ) -> Optional[int]:
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def lowercase__ ( self : str ) -> Dict:
if self._bucket_url is None:
_lowerCAmelCase = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , """/""" ) )
return self._bucket_url
@property
def lowercase__ ( self : Tuple ) -> List[Any]:
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , """/""" ).split("""/""" )[:-1] )
def lowercase__ ( self : Optional[int] , __snake_case : Union[str, Any] , *__snake_case : List[str] ) -> Union[str, Any]:
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
_lowerCAmelCase = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
_lowerCAmelCase = self.dummy_file_name
# special case when data_url is a dict
if isinstance(__snake_case , __snake_case ):
return self.create_dummy_data_dict(__snake_case , __snake_case )
elif isinstance(__snake_case , (list, tuple) ):
return self.create_dummy_data_list(__snake_case , __snake_case )
else:
return self.create_dummy_data_single(__snake_case , __snake_case )
def lowercase__ ( self : Any , __snake_case : str , *__snake_case : Optional[int] ) -> Optional[int]:
return self.download_and_extract(__snake_case )
def lowercase__ ( self : Any , __snake_case : str , __snake_case : str ) -> Union[str, Any]:
return self.download_and_extract(__snake_case )
def lowercase__ ( self : List[str] , __snake_case : int , *__snake_case : int , **__snake_case : str ) -> Union[str, Any]:
return path
def lowercase__ ( self : str ) -> Optional[int]:
return {}
def lowercase__ ( self : Dict , __snake_case : Dict , __snake_case : Any ) -> List[Any]:
_lowerCAmelCase = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(__snake_case , __snake_case ):
for single_url in single_urls:
download_callback(__snake_case )
else:
_lowerCAmelCase = single_urls
download_callback(__snake_case )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(__snake_case , __snake_case ):
_lowerCAmelCase = [os.path.join(__snake_case , urllib.parse.quote_plus(Path(__snake_case ).name ) ) for x in single_urls]
else:
_lowerCAmelCase = single_urls
_lowerCAmelCase = os.path.join(__snake_case , urllib.parse.quote_plus(Path(__snake_case ).name ) )
_lowerCAmelCase = value
# make sure that values are unique
if all(isinstance(__snake_case , __snake_case ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
_lowerCAmelCase = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def lowercase__ ( self : List[Any] , __snake_case : Union[str, Any] , __snake_case : List[str] ) -> Optional[int]:
_lowerCAmelCase = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
_lowerCAmelCase = all(bool(re.findall("""[0-9]{3,}-of-[0-9]{3,}""" , __snake_case ) ) for url in data_url )
_lowerCAmelCase = all(
url.startswith("""https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed""" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
_lowerCAmelCase = [data_url[0]] * len(__snake_case )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(__snake_case )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_lowerCAmelCase = os.path.join(__snake_case , urllib.parse.quote_plus(single_url.split("""/""" )[-1] ) )
dummy_data_list.append(__snake_case )
return dummy_data_list
def lowercase__ ( self : Dict , __snake_case : List[Any] , __snake_case : Dict ) -> int:
for download_callback in self.download_callbacks:
download_callback(__snake_case )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_lowerCAmelCase = os.path.join(__snake_case , urllib.parse.quote_plus(data_url.split("""/""" )[-1] ) )
if os.path.exists(__snake_case ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def lowercase__ ( self : int ) -> Optional[Any]:
pass
def lowercase__ ( self : Dict ) -> str:
pass
def lowercase__ ( self : Tuple , __snake_case : Optional[Any] ) -> str:
def _iter_archive_members(__snake_case : int ):
# this preserves the order of the members inside the ZIP archive
_lowerCAmelCase = Path(self.dummy_file ).parent
_lowerCAmelCase = path.relative_to(__snake_case )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
_lowerCAmelCase = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(__snake_case )
_lowerCAmelCase = Path(__snake_case )
_lowerCAmelCase = _iter_archive_members(__snake_case ) if self.use_local_dummy_data else path.rglob("""*""" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((""".""", """__""") ):
yield file_path.relative_to(__snake_case ).as_posix(), file_path.open("""rb""" )
def lowercase__ ( self : Optional[Any] , __snake_case : List[Any] ) -> Any:
if not isinstance(__snake_case , __snake_case ):
_lowerCAmelCase = [paths]
for path in paths:
if os.path.isfile(__snake_case ):
if os.path.basename(__snake_case ).startswith((""".""", """__""") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(__snake_case ):
if os.path.basename(__snake_case ).startswith((""".""", """__""") ):
continue
dirnames.sort()
for filename in sorted(__snake_case ):
if filename.startswith((""".""", """__""") ):
continue
yield os.path.join(__snake_case , __snake_case )
| 70 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ : Union[str, Any] = {
"""configuration_table_transformer""": [
"""TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TableTransformerConfig""",
"""TableTransformerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[Any] = [
"""TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TableTransformerForObjectDetection""",
"""TableTransformerModel""",
"""TableTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 81 | 0 |
'''simple docstring'''
from __future__ import annotations
import requests
def __a ( _UpperCamelCase: str ) -> dict:
"""simple docstring"""
_snake_case = F"""https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"""
return requests.get(_UpperCamelCase ).json()
def __a ( _UpperCamelCase: int = 10 ) -> list[dict]:
"""simple docstring"""
_snake_case = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"
_snake_case = requests.get(_UpperCamelCase ).json()[:max_stories]
return [get_hackernews_story(_UpperCamelCase ) for story_id in story_ids]
def __a ( _UpperCamelCase: int = 10 ) -> str:
"""simple docstring"""
_snake_case = hackernews_top_stories(_UpperCamelCase )
return "\n".join("* [{title}]({url})".format(**_UpperCamelCase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 354 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
UpperCamelCase_ : int = '''pytorch_model.bin'''
UpperCamelCase_ : str = '''pytorch_model.bin.index.json'''
UpperCamelCase_ : int = '''adapter_config.json'''
UpperCamelCase_ : str = '''adapter_model.bin'''
UpperCamelCase_ : str = '''adapter_model.safetensors'''
UpperCamelCase_ : List[Any] = '''tf_model.h5'''
UpperCamelCase_ : Union[str, Any] = '''tf_model.h5.index.json'''
UpperCamelCase_ : Tuple = '''model.ckpt'''
UpperCamelCase_ : Union[str, Any] = '''flax_model.msgpack'''
UpperCamelCase_ : Union[str, Any] = '''flax_model.msgpack.index.json'''
UpperCamelCase_ : Dict = '''model.safetensors'''
UpperCamelCase_ : List[Any] = '''model.safetensors.index.json'''
UpperCamelCase_ : Tuple = '''config.json'''
UpperCamelCase_ : List[str] = '''preprocessor_config.json'''
UpperCamelCase_ : List[Any] = FEATURE_EXTRACTOR_NAME
UpperCamelCase_ : Union[str, Any] = '''generation_config.json'''
UpperCamelCase_ : str = '''modelcard.json'''
UpperCamelCase_ : List[Any] = '''▁'''
UpperCamelCase_ : Tuple = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
UpperCamelCase_ : Any = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
UpperCamelCase_ : Tuple = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
UpperCamelCase_ : str = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def __a ( _UpperCamelCase: Optional[Any] ) -> int:
"""simple docstring"""
if version.parse(_UpperCamelCase ) < version.parse(_UpperCamelCase ):
if "dev" in min_version:
_snake_case = (
"This example requires a source install from HuggingFace Transformers (see "
"`https://huggingface.co/docs/transformers/installation#install-from-source`),"
)
else:
_snake_case = F"""This example requires a minimum version of {min_version},"""
error_message += F""" but the version found is {__version__}.\n"""
raise ImportError(
error_message
+ "Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other "
"versions of HuggingFace Transformers." )
| 142 | 0 |
from timeit import timeit
snake_case : List[str] = {
"MALAYALAM": True,
"String": False,
"rotor": True,
"level": True,
"A": True,
"BB": True,
"ABC": False,
"amanaplanacanalpanama": True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def lowerCAmelCase_ ( _snake_case : str ) -> bool:
'''simple docstring'''
__magic_name__ : Union[str, Any] = 0
__magic_name__ : Union[str, Any] = len(_snake_case ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def lowerCAmelCase_ ( _snake_case : str ) -> bool:
'''simple docstring'''
__magic_name__ : Dict = len(_snake_case ) // 2
__magic_name__ : Tuple = len(_snake_case )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(_snake_case ) )
def lowerCAmelCase_ ( _snake_case : str ) -> bool:
'''simple docstring'''
if len(_snake_case ) <= 2:
return True
if s[0] == s[len(_snake_case ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def lowerCAmelCase_ ( _snake_case : str ) -> bool:
'''simple docstring'''
return s == s[::-1]
def lowerCAmelCase_ ( _snake_case : str ) -> None:
'''simple docstring'''
__magic_name__ : Dict = F'''all({name}(key) is value for key, value in test_data.items())'''
__magic_name__ : Dict = F'''from __main__ import test_data, {name}'''
__magic_name__ : Optional[int] = 500000
__magic_name__ : Dict = timeit(stmt=_snake_case , setup=_snake_case , number=_snake_case )
print(F'''{name:<35} finished {number:,} runs in {result:.5f} seconds''' )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(F"{key:21} {value}")
print("a man a plan a canal panama")
# finished 500,000 runs in 0.46793 seconds
benchmark_function("is_palindrome_slice")
# finished 500,000 runs in 0.85234 seconds
benchmark_function("is_palindrome")
# finished 500,000 runs in 1.32028 seconds
benchmark_function("is_palindrome_recursive")
# finished 500,000 runs in 2.08679 seconds
benchmark_function("is_palindrome_traversal")
| 281 |
def lowerCAmelCase_ ( _snake_case : str , _snake_case : str ) -> bool:
'''simple docstring'''
__magic_name__ : Union[str, Any] = len(_snake_case ) + 1
__magic_name__ : List[str] = len(_snake_case ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
__magic_name__ : str = [[0 for i in range(_snake_case )] for j in range(_snake_case )]
# since string of zero length match pattern of zero length
__magic_name__ : Optional[int] = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , _snake_case ):
__magic_name__ : Optional[int] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , _snake_case ):
__magic_name__ : Union[str, Any] = dp[0][j - 2] if pattern[j - 1] == "*" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , _snake_case ):
for j in range(1 , _snake_case ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
__magic_name__ : Optional[int] = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
__magic_name__ : Optional[Any] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
__magic_name__ : List[Any] = dp[i - 1][j]
else:
__magic_name__ : Union[str, Any] = 0
else:
__magic_name__ : Dict = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
snake_case : Optional[Any] = "aab"
snake_case : List[str] = "c*a*b"
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F"{input_string} matches the given pattern {pattern}")
else:
print(F"{input_string} does not match with the given pattern {pattern}")
| 281 | 1 |
"""simple docstring"""
def _A ( lowercase = 10_00 ):
"""simple docstring"""
a =3
a =0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F'''{solution() = }''') | 368 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCamelCase_ : List[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = ["pixel_values"]
def __init__( self , __A = True , __A = None , __A = PILImageResampling.BICUBIC , __A = True , __A = None , __A = True , __A = 1 / 255 , __A = True , __A = None , __A = None , __A = True , **__A , ) -> None:
super().__init__(**__A )
a =size if size is not None else {'''shortest_edge''': 224}
a =get_size_dict(__A , default_to_square=__A )
a =crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
a =get_size_dict(__A , default_to_square=__A , param_name='''crop_size''' )
a =do_resize
a =size
a =resample
a =do_center_crop
a =crop_size
a =do_rescale
a =rescale_factor
a =do_normalize
a =image_mean if image_mean is not None else OPENAI_CLIP_MEAN
a =image_std if image_std is not None else OPENAI_CLIP_STD
a =do_convert_rgb
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A = PILImageResampling.BICUBIC , __A = None , **__A , ) -> np.ndarray:
a =get_size_dict(__A , default_to_square=__A )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
a =get_resize_output_image_size(__A , size=size['''shortest_edge'''] , default_to_square=__A )
return resize(__A , size=__A , resample=__A , data_format=__A , **__A )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A = None , **__A , ) -> np.ndarray:
a =get_size_dict(__A )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(__A , size=(size['''height'''], size['''width''']) , data_format=__A , **__A )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A = None , **__A , ) -> Any:
return rescale(__A , scale=__A , data_format=__A , **__A )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , __A = None , **__A , ) -> np.ndarray:
return normalize(__A , mean=__A , std=__A , data_format=__A , **__A )
def SCREAMING_SNAKE_CASE ( self , __A , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = ChannelDimension.FIRST , **__A , ) -> PIL.Image.Image:
a =do_resize if do_resize is not None else self.do_resize
a =size if size is not None else self.size
a =get_size_dict(__A , param_name='''size''' , default_to_square=__A )
a =resample if resample is not None else self.resample
a =do_center_crop if do_center_crop is not None else self.do_center_crop
a =crop_size if crop_size is not None else self.crop_size
a =get_size_dict(__A , param_name='''crop_size''' , default_to_square=__A )
a =do_rescale if do_rescale is not None else self.do_rescale
a =rescale_factor if rescale_factor is not None else self.rescale_factor
a =do_normalize if do_normalize is not None else self.do_normalize
a =image_mean if image_mean is not None else self.image_mean
a =image_std if image_std is not None else self.image_std
a =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
a =make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
a =[convert_to_rgb(__A ) for image in images]
# All transformations expect numpy arrays.
a =[to_numpy_array(__A ) for image in images]
if do_resize:
a =[self.resize(image=__A , size=__A , resample=__A ) for image in images]
if do_center_crop:
a =[self.center_crop(image=__A , size=__A ) for image in images]
if do_rescale:
a =[self.rescale(image=__A , scale=__A ) for image in images]
if do_normalize:
a =[self.normalize(image=__A , mean=__A , std=__A ) for image in images]
a =[to_channel_dimension_format(__A , __A ) for image in images]
a ={'''pixel_values''': images}
return BatchFeature(data=__A , tensor_type=__A ) | 215 | 0 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
__A : Union[str, Any] = logging.get_logger(__name__)
__A : Dict = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all LED models at https://huggingface.co/models?filter=LED
__A : List[Any] = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
__A : int = {
"allenai/led-base-16384": 16384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
_UpperCAmelCase = bs[:]
_UpperCAmelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_SCREAMING_SNAKE_CASE )
cs.append(2**8 + n )
n += 1
_UpperCAmelCase = [chr(_SCREAMING_SNAKE_CASE ) for n in cs]
return dict(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def lowercase ( _SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
_UpperCAmelCase = set()
_UpperCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_UpperCAmelCase = char
return pairs
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = ["""input_ids""", """attention_mask"""]
def __init__( self : Any , __UpperCamelCase : Tuple , __UpperCamelCase : str , __UpperCamelCase : Union[str, Any]="replace" , __UpperCamelCase : List[str]="<s>" , __UpperCamelCase : Union[str, Any]="</s>" , __UpperCamelCase : Optional[Any]="</s>" , __UpperCamelCase : List[Any]="<s>" , __UpperCamelCase : Any="<unk>" , __UpperCamelCase : List[Any]="<pad>" , __UpperCamelCase : Tuple="<mask>" , __UpperCamelCase : str=False , **__UpperCamelCase : Any , )->int:
_UpperCAmelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else bos_token
_UpperCAmelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else eos_token
_UpperCAmelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else sep_token
_UpperCAmelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else cls_token
_UpperCAmelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else unk_token
_UpperCAmelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else mask_token
super().__init__(
errors=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , add_prefix_space=__UpperCamelCase , **__UpperCamelCase , )
with open(__UpperCamelCase , encoding='''utf-8''' ) as vocab_handle:
_UpperCAmelCase = json.load(__UpperCamelCase )
_UpperCAmelCase = {v: k for k, v in self.encoder.items()}
_UpperCAmelCase = errors # how to handle errors in decoding
_UpperCAmelCase = bytes_to_unicode()
_UpperCAmelCase = {v: k for k, v in self.byte_encoder.items()}
with open(__UpperCamelCase , encoding='''utf-8''' ) as merges_handle:
_UpperCAmelCase = merges_handle.read().split('''\n''' )[1:-1]
_UpperCAmelCase = [tuple(merge.split() ) for merge in bpe_merges]
_UpperCAmelCase = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
_UpperCAmelCase = {}
_UpperCAmelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_UpperCAmelCase = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def lowercase__ ( self : str )->Dict:
return len(self.encoder )
def lowercase__ ( self : List[str] )->Union[str, Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase__ ( self : Dict , __UpperCamelCase : int )->Optional[Any]:
if token in self.cache:
return self.cache[token]
_UpperCAmelCase = tuple(__UpperCamelCase )
_UpperCAmelCase = get_pairs(__UpperCamelCase )
if not pairs:
return token
while True:
_UpperCAmelCase = min(__UpperCamelCase , key=lambda __UpperCamelCase : self.bpe_ranks.get(__UpperCamelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_UpperCAmelCase , _UpperCAmelCase = bigram
_UpperCAmelCase = []
_UpperCAmelCase = 0
while i < len(__UpperCamelCase ):
try:
_UpperCAmelCase = word.index(__UpperCamelCase , __UpperCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_UpperCAmelCase = j
if word[i] == first and i < len(__UpperCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_UpperCAmelCase = tuple(__UpperCamelCase )
_UpperCAmelCase = new_word
if len(__UpperCamelCase ) == 1:
break
else:
_UpperCAmelCase = get_pairs(__UpperCamelCase )
_UpperCAmelCase = ''' '''.join(__UpperCamelCase )
_UpperCAmelCase = word
return word
def lowercase__ ( self : Optional[int] , __UpperCamelCase : Tuple )->int:
_UpperCAmelCase = []
for token in re.findall(self.pat , __UpperCamelCase ):
_UpperCAmelCase = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__UpperCamelCase ).split(''' ''' ) )
return bpe_tokens
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : Tuple )->Optional[Any]:
return self.encoder.get(__UpperCamelCase , self.encoder.get(self.unk_token ) )
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : int )->List[str]:
return self.decoder.get(__UpperCamelCase )
def lowercase__ ( self : Any , __UpperCamelCase : Dict )->List[Any]:
_UpperCAmelCase = ''''''.join(__UpperCamelCase )
_UpperCAmelCase = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def lowercase__ ( self : List[Any] , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None )->Tuple[str]:
if not os.path.isdir(__UpperCamelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCAmelCase = os.path.join(
__UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_UpperCAmelCase = os.path.join(
__UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__UpperCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCamelCase , ensure_ascii=__UpperCamelCase ) + '''\n''' )
_UpperCAmelCase = 0
with open(__UpperCamelCase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCamelCase : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
_UpperCAmelCase = token_index
writer.write(''' '''.join(__UpperCamelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
def lowercase__ ( self : int , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None )->List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
_UpperCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self : List[Any] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None , __UpperCamelCase : bool = False )->List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCamelCase )) + [1]
return [1] + ([0] * len(__UpperCamelCase )) + [1, 1] + ([0] * len(__UpperCamelCase )) + [1]
def lowercase__ ( self : Tuple , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None )->List[int]:
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Optional[Any]=False , **__UpperCamelCase : List[str] )->Dict:
_UpperCAmelCase = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__UpperCamelCase ) > 0 and not text[0].isspace()):
_UpperCAmelCase = ''' ''' + text
return (text, kwargs)
def lowercase__ ( self : List[str] , __UpperCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[bool] = None , )->dict:
_UpperCAmelCase = super()._pad(
encoded_inputs=__UpperCamelCase , max_length=__UpperCamelCase , padding_strategy=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=__UpperCamelCase , )
# Load from model defaults
if return_attention_mask is None:
_UpperCAmelCase = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_UpperCAmelCase = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_UpperCAmelCase = len(encoded_inputs['''global_attention_mask'''] ) != len(__UpperCamelCase )
if needs_to_be_padded:
_UpperCAmelCase = len(__UpperCamelCase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_UpperCAmelCase = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
_UpperCAmelCase = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 260 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : Union[str, Any] = logging.get_logger(__name__)
__A : Dict = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = """camembert"""
def __init__( self : List[str] , __UpperCamelCase : Union[str, Any]=3_0_5_2_2 , __UpperCamelCase : Optional[Any]=7_6_8 , __UpperCamelCase : Optional[int]=1_2 , __UpperCamelCase : Union[str, Any]=1_2 , __UpperCamelCase : List[Any]=3_0_7_2 , __UpperCamelCase : Dict="gelu" , __UpperCamelCase : Tuple=0.1 , __UpperCamelCase : int=0.1 , __UpperCamelCase : int=5_1_2 , __UpperCamelCase : Dict=2 , __UpperCamelCase : int=0.0_2 , __UpperCamelCase : int=1e-12 , __UpperCamelCase : Optional[Any]=1 , __UpperCamelCase : Dict=0 , __UpperCamelCase : Optional[Any]=2 , __UpperCamelCase : Any="absolute" , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : str=None , **__UpperCamelCase : Optional[Any] , )->str:
super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = position_embedding_type
_UpperCAmelCase = use_cache
_UpperCAmelCase = classifier_dropout
class _a ( lowerCAmelCase):
"""simple docstring"""
@property
def lowercase__ ( self : int )->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 260 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase = {
'configuration_wav2vec2': ['WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Wav2Vec2Config'],
'feature_extraction_wav2vec2': ['Wav2Vec2FeatureExtractor'],
'processing_wav2vec2': ['Wav2Vec2Processor'],
'tokenization_wav2vec2': ['Wav2Vec2CTCTokenizer', 'Wav2Vec2Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Wav2Vec2ForAudioFrameClassification',
'Wav2Vec2ForCTC',
'Wav2Vec2ForMaskedLM',
'Wav2Vec2ForPreTraining',
'Wav2Vec2ForSequenceClassification',
'Wav2Vec2ForXVector',
'Wav2Vec2Model',
'Wav2Vec2PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWav2Vec2ForCTC',
'TFWav2Vec2Model',
'TFWav2Vec2PreTrainedModel',
'TFWav2Vec2ForSequenceClassification',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'FlaxWav2Vec2ForCTC',
'FlaxWav2Vec2ForPreTraining',
'FlaxWav2Vec2Model',
'FlaxWav2Vec2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 360 |
from maths.prime_factors import prime_factors
def _A ( lowerCAmelCase_ : int ):
"""simple docstring"""
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
lowerCAmelCase__ = F'Input value of [number={number}] must be an integer'
raise TypeError(lowerCAmelCase_ )
if number < 1:
raise ValueError("Input must be a positive integer" )
return -1 if len(prime_factors(lowerCAmelCase_ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 221 | 0 |
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any]=1_3 , UpperCAmelCase__ : int=7 , UpperCAmelCase__ : Optional[int]=False , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : int=False , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : List[str]=3_3 , UpperCAmelCase__ : Dict=3_2 , UpperCAmelCase__ : Optional[Any]=5 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : Optional[int]=3_7 , UpperCAmelCase__ : List[str]="gelu" , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : str=5_1_2 , UpperCAmelCase__ : Dict=1_6 , UpperCAmelCase__ : List[str]=2 , UpperCAmelCase__ : str=0.02 , UpperCAmelCase__ : str=3 , UpperCAmelCase__ : List[Any]=4 , UpperCAmelCase__ : str=None , ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_input_mask
__SCREAMING_SNAKE_CASE = use_token_type_ids
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = num_choices
__SCREAMING_SNAKE_CASE = scope
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCAmelCase_ ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict ) -> List[Any]:
__SCREAMING_SNAKE_CASE = EsmModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] ) -> List[str]:
__SCREAMING_SNAKE_CASE = EsmForMaskedLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any ) -> List[Any]:
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = EsmForTokenClassification(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
"""simple docstring"""
snake_case__ : int = False
snake_case__ : List[str] = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case__ : str = ()
snake_case__ : List[str] = (
{
"feature-extraction": EsmModel,
"fill-mask": EsmForMaskedLM,
"text-classification": EsmForSequenceClassification,
"token-classification": EsmForTokenClassification,
"zero-shot": EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : List[str] = True
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = EsmModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=3_7 )
def UpperCAmelCase_ ( self : Optional[int] ) -> str:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase__ )
@slow
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]:
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = EsmModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()[0]
__SCREAMING_SNAKE_CASE = EsmEmbeddings(config=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = torch.as_tensor([[1_2, 3_1, 1_3, model.padding_idx]] )
__SCREAMING_SNAKE_CASE = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
__SCREAMING_SNAKE_CASE = create_position_ids_from_input_ids(UpperCAmelCase__ , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(UpperCAmelCase__ , UpperCAmelCase__ ) ) )
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()[0]
__SCREAMING_SNAKE_CASE = EsmEmbeddings(config=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = torch.empty(2 , 4 , 3_0 )
__SCREAMING_SNAKE_CASE = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
__SCREAMING_SNAKE_CASE = torch.as_tensor([expected_single_positions, expected_single_positions] )
__SCREAMING_SNAKE_CASE = embeddings.create_position_ids_from_inputs_embeds(UpperCAmelCase__ )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(UpperCAmelCase__ , UpperCAmelCase__ ) ) )
@unittest.skip("Esm does not support embedding resizing" )
def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
pass
@unittest.skip("Esm does not support embedding resizing" )
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def UpperCAmelCase_ ( self : Dict ) -> Tuple:
pass
@require_torch
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self : Dict ) -> Dict:
with torch.no_grad():
__SCREAMING_SNAKE_CASE = EsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" )
model.eval()
__SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE = 3_3
__SCREAMING_SNAKE_CASE = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = torch.tensor(
[[[8.9_215, -10.5_898, -6.4_671], [-6.3_967, -13.9_114, -1.1_212], [-7.7_812, -13.9_516, -3.7_406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1E-4 ) )
@slow
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
with torch.no_grad():
__SCREAMING_SNAKE_CASE = EsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" )
model.eval()
__SCREAMING_SNAKE_CASE = torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )[0]
# compare the actual values for a slice.
__SCREAMING_SNAKE_CASE = torch.tensor(
[[[0.1_444, 0.5_413, 0.3_248], [0.3_034, 0.0_053, 0.3_108], [0.3_228, -0.2_499, 0.3_415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1E-4 ) )
| 54 |
"""simple docstring"""
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
lowerCAmelCase__ = '''\
@inproceedings{lin-2004-rouge,
title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",
author = "Lin, Chin-Yew",
booktitle = "Text Summarization Branches Out",
month = jul,
year = "2004",
address = "Barcelona, Spain",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W04-1013",
pages = "74--81",
}
'''
lowerCAmelCase__ = '''\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
'''
lowerCAmelCase__ = '''
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,
`"rougeL"`: Longest common subsequence based scoring.
`"rougeLSum"`: rougeLsum splits text using `"\n"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric(\'rouge\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
[\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']
>>> print(results["rouge1"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results["rouge1"].mid.fmeasure)
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCamelCase ( datasets.Metric ):
def snake_case_ (self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/google-research/google-research/tree/master/rouge"] , reference_urls=[
"https://en.wikipedia.org/wiki/ROUGE_(metric)",
"https://github.com/google-research/google-research/tree/master/rouge",
] , )
def snake_case_ (self , __a , __a , __a=None , __a=True , __a=False ) -> Union[str, Any]:
if rouge_types is None:
UpperCamelCase = ["rouge1", "rouge2", "rougeL", "rougeLsum"]
UpperCamelCase = rouge_scorer.RougeScorer(rouge_types=__a , use_stemmer=__a )
if use_aggregator:
UpperCamelCase = scoring.BootstrapAggregator()
else:
UpperCamelCase = []
for ref, pred in zip(__a , __a ):
UpperCamelCase = scorer.score(__a , __a )
if use_aggregator:
aggregator.add_scores(__a )
else:
scores.append(__a )
if use_aggregator:
UpperCamelCase = aggregator.aggregate()
else:
UpperCamelCase = {}
for key in scores[0]:
UpperCamelCase = [score[key] for score in scores]
return result
| 153 | 0 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = 42
lowerCamelCase = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.26.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version(">=", "0.0.12")
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = 42
lowerCamelCase = 42
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 363 |
import random
def _snake_case( SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : str ) -> tuple:
'''simple docstring'''
A__ , A__ , A__ = [], [], []
for element in data:
if element < pivot:
less.append(SCREAMING_SNAKE_CASE__ )
elif element > pivot:
greater.append(SCREAMING_SNAKE_CASE__ )
else:
equal.append(SCREAMING_SNAKE_CASE__ )
return less, equal, greater
def _snake_case( SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : int ) -> str:
'''simple docstring'''
if index >= len(SCREAMING_SNAKE_CASE__ ) or index < 0:
return None
A__ = items[random.randint(0 , len(SCREAMING_SNAKE_CASE__ ) - 1 )]
A__ = 0
A__ , A__ , A__ = _partition(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = len(SCREAMING_SNAKE_CASE__ )
A__ = len(SCREAMING_SNAKE_CASE__ )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# must be in larger
else:
return quick_select(SCREAMING_SNAKE_CASE__ , index - (m + count) )
| 282 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_A : List[Any] = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str = ['YolosFeatureExtractor']
_A : int = ['YolosImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : List[Any] = [
'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST',
'YolosForObjectDetection',
'YolosModel',
'YolosPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
_A : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 142 |
from typing import Any
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , A : Any ) ->Optional[int]:
lowerCamelCase__ : Optional[int] = data
lowerCamelCase__ : Any = None
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] ) ->str:
lowerCamelCase__ : Any = None
def __lowerCamelCase ( self : Tuple ) ->Any:
lowerCamelCase__ : str = self.head
while temp is not None:
print(temp.data , end=''' ''' )
lowerCamelCase__ : Dict = temp.next
print()
def __lowerCamelCase ( self : Dict , A : Any ) ->Optional[int]:
lowerCamelCase__ : Union[str, Any] = Node(A )
lowerCamelCase__ : Dict = self.head
lowerCamelCase__ : List[str] = new_node
def __lowerCamelCase ( self : Optional[int] , A : int , A : Tuple ) ->List[Any]:
if node_data_a == node_data_a:
return
else:
lowerCamelCase__ : Tuple = self.head
while node_a is not None and node_a.data != node_data_a:
lowerCamelCase__ : Union[str, Any] = node_a.next
lowerCamelCase__ : int = self.head
while node_a is not None and node_a.data != node_data_a:
lowerCamelCase__ : Optional[int] = node_a.next
if node_a is None or node_a is None:
return
lowerCamelCase__ , lowerCamelCase__ : str = node_a.data, node_a.data
if __name__ == "__main__":
_A : List[Any] = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('After swapping')
ll.print_list()
| 142 | 1 |
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase) -> None:
a = len(__UpperCamelCase)
print("The following activities are selected:")
# The first activity is always selected
a = 0
print(__UpperCamelCase , end=",")
# Consider rest of the activities
for j in range(__UpperCamelCase):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(__UpperCamelCase , end=",")
a = j
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ : Optional[Any] = [1, 3, 0, 5, 8, 5]
lowercase__ : int = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 180 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : Any = logging.get_logger(__name__)
lowercase__ : int = {
"asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class a__ ( UpperCamelCase__ ):
a : Optional[Any] = """sew-d"""
def __init__( self , A=32 , A=768 , A=12 , A=12 , A=3072 , A=2 , A=512 , A=256 , A=True , A=True , A=("p2c", "c2p") , A="layer_norm" , A="gelu_python" , A=0.1 , A=0.1 , A=0.1 , A=0.0 , A=0.1 , A=0.0_2 , A=1e-7 , A=1e-5 , A="group" , A="gelu" , A=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , A=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , A=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , A=False , A=128 , A=16 , A=True , A=0.0_5 , A=10 , A=2 , A=0.0 , A=10 , A=0 , A="mean" , A=False , A=False , A=256 , A=0 , A=1 , A=2 , **A , ) -> Dict:
'''simple docstring'''
super().__init__(**A , pad_token_id=A , bos_token_id=A , eos_token_id=A )
a = hidden_size
a = feat_extract_norm
a = feat_extract_activation
a = list(A )
a = list(A )
a = list(A )
a = conv_bias
a = num_conv_pos_embeddings
a = num_conv_pos_embedding_groups
a = len(self.conv_dim )
a = num_hidden_layers
a = intermediate_size
a = squeeze_factor
a = max_position_embeddings
a = position_buckets
a = share_att_key
a = relative_attention
a = norm_rel_ebd
a = list(A )
a = hidden_act
a = num_attention_heads
a = hidden_dropout
a = attention_dropout
a = activation_dropout
a = feat_proj_dropout
a = final_dropout
a = layer_norm_eps
a = feature_layer_norm_eps
a = initializer_range
a = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect."
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a = apply_spec_augment
a = mask_time_prob
a = mask_time_length
a = mask_time_min_masks
a = mask_feature_prob
a = mask_feature_length
a = mask_feature_min_masks
# ctc loss
a = ctc_loss_reduction
a = ctc_zero_infinity
# sequence classification
a = use_weighted_layer_sum
a = classifier_proj_size
@property
def lowerCAmelCase_ ( self ) -> List[str]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 180 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __a ( unittest.TestCase ):
@slow
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = XLMRobertaModel.from_pretrained("xlm-roberta-base" )
UpperCamelCase__ : List[Any] = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
UpperCamelCase__ : Optional[Any] = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim
UpperCamelCase__ : Any = torch.tensor(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCamelCase__ : Union[str, Any] = model(__lowercase )["last_hidden_state"].detach()
self.assertEqual(output.shape , __lowercase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __lowercase , atol=1e-3 ) )
@slow
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCamelCase__ : Any = XLMRobertaModel.from_pretrained("xlm-roberta-large" )
UpperCamelCase__ : Optional[Any] = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
UpperCamelCase__ : Union[str, Any] = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim
UpperCamelCase__ : Dict = torch.tensor(
[[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCamelCase__ : Any = model(__lowercase )["last_hidden_state"].detach()
self.assertEqual(output.shape , __lowercase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __lowercase , atol=1e-3 ) ) | 189 |
import functools
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
if not isinstance(_A , _A ) or not all(isinstance(_A , _A ) for day in days ):
raise ValueError("The parameter days should be a list of integers" )
if len(_A ) != 3 or not all(isinstance(_A , _A ) for cost in costs ):
raise ValueError("The parameter costs should be a list of three integers" )
if len(_A ) == 0:
return 0
if min(_A ) <= 0:
raise ValueError("All days elements should be greater than 0" )
if max(_A ) >= 366:
raise ValueError("All days elements should be less than 366" )
snake_case_ = set(_A )
@functools.cache
def dynamic_programming(_A ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 187 | 0 |
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
UpperCAmelCase_ : Optional[int] = open # noqa: we just need to have a builtin inside this module to test it properly
| 198 |
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
UpperCAmelCase_ : Dict = {
"169M": 12,
"430M": 24,
"1B5": 24,
"3B": 32,
"7B": 32,
"14B": 40,
}
UpperCAmelCase_ : Any = {
"169M": 768,
"430M": 1_024,
"1B5": 2_048,
"3B": 2_560,
"7B": 4_096,
"14B": 5_120,
}
def UpperCamelCase ( _A : Dict )-> Optional[int]:
"""simple docstring"""
A__ = list(state_dict.keys() )
for name in state_dict_keys:
A__ = state_dict.pop(_A )
# emb -> embedding
if name.startswith("emb." ):
A__ = name.replace("emb." , "embeddings." )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("blocks.0.ln0" ):
A__ = name.replace("blocks.0.ln0" , "blocks.0.pre_ln" )
# att -> attention
A__ = re.sub(R"blocks\.(\d+)\.att" , R"blocks.\1.attention" , _A )
# ffn -> feed_forward
A__ = re.sub(R"blocks\.(\d+)\.ffn" , R"blocks.\1.feed_forward" , _A )
# time_mix_k -> time_mix_key and reshape
if name.endswith(".time_mix_k" ):
A__ = name.replace(".time_mix_k" , ".time_mix_key" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(".time_mix_v" ):
A__ = name.replace(".time_mix_v" , ".time_mix_value" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(".time_mix_r" ):
A__ = name.replace(".time_mix_r" , ".time_mix_receptance" )
if name != "head.weight":
A__ = "rwkv." + name
A__ = weight
return state_dict
def UpperCamelCase ( _A : str , _A : List[Any] , _A : List[Any] , _A : int=None , _A : List[str]=None , _A : Dict=False , _A : List[Any]=None )-> str:
"""simple docstring"""
if tokenizer_file is None:
print("No `--tokenizer_file` provided, we will use the default tokenizer." )
A__ = 50277
A__ = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b" )
else:
A__ = PreTrainedTokenizerFast(tokenizer_file=_A )
A__ = len(_A )
tokenizer.save_pretrained(_A )
# 2. Build the config
A__ = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
A__ = candidate
break
if size is None:
raise ValueError("Could not infer the size, please provide it with the `--size` argument." )
if size not in possible_sizes:
raise ValueError(f"""`size` should be one of {possible_sizes}, got {size}.""" )
A__ = RwkvConfig(
vocab_size=_A , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(_A )
# 3. Download model file then convert state_dict
A__ = hf_hub_download(_A , _A )
A__ = torch.load(_A , map_location="cpu" )
A__ = convert_state_dict(_A )
# 4. Split in shards and save
A__ , A__ = shard_checkpoint(_A )
for shard_file, shard in shards.items():
torch.save(_A , os.path.join(_A , _A ) )
if index is not None:
A__ = os.path.join(_A , _A )
# Save the index as well
with open(_A , "w" , encoding="utf-8" ) as f:
A__ = json.dumps(_A , indent=2 , sort_keys=_A ) + "\n"
f.write(_A )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model." )
A__ = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
A__ = torch.load(os.path.join(_A , _A ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(_A , _A ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("Please provide a `model_name` to push the model to the Hub." )
A__ = AutoModelForCausalLM.from_pretrained(_A )
model.push_to_hub(_A , max_shard_size="2GB" )
tokenizer.push_to_hub(_A )
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--repo_id", default=None, type=str, required=True, help="Repo ID from which to pull the checkpoint."
)
parser.add_argument(
"--checkpoint_file", default=None, type=str, required=True, help="Name of the checkpoint file in the repo."
)
parser.add_argument(
"--output_dir", default=None, type=str, required=True, help="Where to save the converted model."
)
parser.add_argument(
"--tokenizer_file",
default=None,
type=str,
help="Path to the tokenizer file to use (if not provided, only the model is converted).",
)
parser.add_argument(
"--size",
default=None,
type=str,
help="Size of the model. Will be inferred from the `checkpoint_file` if not passed.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Push to the Hub the converted model.",
)
parser.add_argument(
"--model_name",
default=None,
type=str,
help="Name of the pushed model on the Hub, including the username / organization.",
)
UpperCAmelCase_ : Optional[int] = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 198 | 1 |
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] ) -> Optional[int]:
if isinstance(SCREAMING_SNAKE_CASE , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class A__ :
def a__ ( self : int , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict ) -> Tuple:
"""simple docstring"""
pass
def a__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
pass
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
pass
def a__ ( self : List[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str]=None , **_UpperCAmelCase : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase = VisionTextDualEncoderConfig.from_vision_text_configs(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(_UpperCAmelCase )
__lowercase = model(input_ids=_UpperCAmelCase , pixel_values=_UpperCAmelCase , attention_mask=_UpperCAmelCase )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) )
def a__ ( self : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple=None , **_UpperCAmelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=_UpperCAmelCase , text_model=_UpperCAmelCase )
__lowercase = model(input_ids=_UpperCAmelCase , pixel_values=_UpperCAmelCase , attention_mask=_UpperCAmelCase )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def a__ ( self : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : Dict , _UpperCAmelCase : Any=None , **_UpperCAmelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = {'vision_model': vision_model, 'text_model': text_model}
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_UpperCAmelCase )
__lowercase = model(input_ids=_UpperCAmelCase , pixel_values=_UpperCAmelCase , attention_mask=_UpperCAmelCase )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def a__ ( self : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any]=None , **_UpperCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=_UpperCAmelCase , text_model=_UpperCAmelCase )
__lowercase = model(input_ids=_UpperCAmelCase , pixel_values=_UpperCAmelCase , attention_mask=_UpperCAmelCase )
__lowercase = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCAmelCase )
__lowercase = TFVisionTextDualEncoderModel.from_pretrained(_UpperCAmelCase )
__lowercase = model(input_ids=_UpperCAmelCase , pixel_values=_UpperCAmelCase , attention_mask=_UpperCAmelCase )
__lowercase = after_output[0].numpy()
__lowercase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_UpperCAmelCase , 1e-5 )
def a__ ( self : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any]=None , **_UpperCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=_UpperCAmelCase , text_model=_UpperCAmelCase )
__lowercase = model(
input_ids=_UpperCAmelCase , pixel_values=_UpperCAmelCase , attention_mask=_UpperCAmelCase , output_attentions=_UpperCAmelCase )
__lowercase = output.vision_model_output.attentions
self.assertEqual(len(_UpperCAmelCase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowercase = to_atuple(vision_model.config.image_size )
__lowercase = to_atuple(vision_model.config.patch_size )
__lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowercase = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowercase = output.text_model_output.attentions
self.assertEqual(len(_UpperCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def a__ ( self : Dict , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : float ) -> Optional[Any]:
"""simple docstring"""
__lowercase = np.abs((a - b) ).max()
self.assertLessEqual(_UpperCAmelCase , _UpperCAmelCase , f"""Difference between torch and flax is {diff} (>= {tol}).""" )
def a__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**_UpperCAmelCase )
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_UpperCAmelCase )
def a__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_UpperCAmelCase )
def a__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_save_load(**_UpperCAmelCase )
def a__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_UpperCAmelCase )
@slow
def a__ ( self : Any ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.get_pretrained_model_and_inputs()
__lowercase = model_a(**_UpperCAmelCase )
__lowercase = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_UpperCAmelCase )
__lowercase = TFVisionTextDualEncoderModel.from_pretrained(_UpperCAmelCase )
__lowercase = model_a(**_UpperCAmelCase )
__lowercase = after_outputs[0].numpy()
__lowercase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_UpperCAmelCase , 1e-5 )
@require_tf
class A__ ( lowerCAmelCase__ , unittest.TestCase ):
def a__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-random-bert' )
__lowercase = 13
__lowercase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowercase = random_attention_mask([batch_size, 4] )
__lowercase = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def a__ ( self : str , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase = TFViTModel(_UpperCAmelCase , name='vision_model' )
__lowercase = TFBertModel(_UpperCAmelCase , name='text_model' )
return vision_model, text_model
def a__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase = TFViTModelTester(self )
__lowercase = TFBertModelTester(self )
__lowercase = vit_model_tester.prepare_config_and_inputs()
__lowercase = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = vision_config_and_inputs
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class A__ ( lowerCAmelCase__ , unittest.TestCase ):
def a__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-deit-tf' , 'hf-internal-testing/tiny-random-roberta' )
__lowercase = 13
__lowercase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowercase = random_attention_mask([batch_size, 4] )
__lowercase = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def a__ ( self : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str]=None , **_UpperCAmelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=_UpperCAmelCase , text_model=_UpperCAmelCase )
__lowercase = model(
input_ids=_UpperCAmelCase , pixel_values=_UpperCAmelCase , attention_mask=_UpperCAmelCase , output_attentions=_UpperCAmelCase )
__lowercase = output.vision_model_output.attentions
self.assertEqual(len(_UpperCAmelCase ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__lowercase = to_atuple(vision_model.config.image_size )
__lowercase = to_atuple(vision_model.config.patch_size )
__lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowercase = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowercase = output.text_model_output.attentions
self.assertEqual(len(_UpperCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def a__ ( self : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] ) -> int:
"""simple docstring"""
__lowercase = TFDeiTModel(_UpperCAmelCase , name='vision_model' )
__lowercase = TFRobertaModel(_UpperCAmelCase , name='text_model' )
return vision_model, text_model
def a__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = TFDeiTModelTester(self )
__lowercase = TFRobertaModelTester(self )
__lowercase = vit_model_tester.prepare_config_and_inputs()
__lowercase = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = vision_config_and_inputs
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class A__ ( lowerCAmelCase__ , unittest.TestCase ):
def a__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-clip-tf' , 'hf-internal-testing/tiny-random-bert' )
__lowercase = 13
__lowercase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowercase = random_attention_mask([batch_size, 4] )
__lowercase = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def a__ ( self : List[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str ) -> Tuple:
"""simple docstring"""
__lowercase = TFCLIPVisionModel(_UpperCAmelCase , name='vision_model' )
__lowercase = TFBertModel(_UpperCAmelCase , name='text_model' )
return vision_model, text_model
def a__ ( self : int ) -> Any:
"""simple docstring"""
__lowercase = TFCLIPVisionModelTester(self )
__lowercase = TFBertModelTester(self )
__lowercase = clip_model_tester.prepare_config_and_inputs()
__lowercase = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase = vision_config_and_inputs
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class A__ ( unittest.TestCase ):
@slow
def a__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_pretrained(
'clip-italian/clip-italian' , logit_scale_init_value=1.0 , from_pt=_UpperCAmelCase )
__lowercase = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' )
__lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
__lowercase = processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors='np' )
__lowercase = model(**_UpperCAmelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__lowercase = np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _UpperCAmelCase , atol=1e-3 ) )
| 325 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : List[Any] = "layoutlmv3"
def __init__( self : Optional[Any] , _UpperCAmelCase : Dict=5_02_65 , _UpperCAmelCase : str=7_68 , _UpperCAmelCase : Union[str, Any]=12 , _UpperCAmelCase : int=12 , _UpperCAmelCase : Optional[int]=30_72 , _UpperCAmelCase : List[str]="gelu" , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Optional[int]=5_12 , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : Dict=0.02 , _UpperCAmelCase : Optional[int]=1e-5 , _UpperCAmelCase : str=1 , _UpperCAmelCase : Union[str, Any]=0 , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : Dict=10_24 , _UpperCAmelCase : int=1_28 , _UpperCAmelCase : Dict=1_28 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[int]=32 , _UpperCAmelCase : List[Any]=1_28 , _UpperCAmelCase : List[Any]=64 , _UpperCAmelCase : List[Any]=2_56 , _UpperCAmelCase : int=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Optional[int]=2_24 , _UpperCAmelCase : int=3 , _UpperCAmelCase : Optional[Any]=16 , _UpperCAmelCase : List[Any]=None , **_UpperCAmelCase : List[str] , ) -> Dict:
"""simple docstring"""
super().__init__(
vocab_size=_UpperCAmelCase , hidden_size=_UpperCAmelCase , num_hidden_layers=_UpperCAmelCase , num_attention_heads=_UpperCAmelCase , intermediate_size=_UpperCAmelCase , hidden_act=_UpperCAmelCase , hidden_dropout_prob=_UpperCAmelCase , attention_probs_dropout_prob=_UpperCAmelCase , max_position_embeddings=_UpperCAmelCase , type_vocab_size=_UpperCAmelCase , initializer_range=_UpperCAmelCase , layer_norm_eps=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
__lowercase = max_ad_position_embeddings
__lowercase = coordinate_size
__lowercase = shape_size
__lowercase = has_relative_attention_bias
__lowercase = rel_pos_bins
__lowercase = max_rel_pos
__lowercase = has_spatial_attention_bias
__lowercase = rel_ad_pos_bins
__lowercase = max_rel_ad_pos
__lowercase = text_embed
__lowercase = visual_embed
__lowercase = input_size
__lowercase = num_channels
__lowercase = patch_size
__lowercase = classifier_dropout
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : int = version.parse("1.12" )
@property
def a__ ( self : int ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
] )
@property
def a__ ( self : int ) -> float:
"""simple docstring"""
return 1e-5
@property
def a__ ( self : str ) -> int:
"""simple docstring"""
return 12
def a__ ( self : str , _UpperCAmelCase : "ProcessorMixin" , _UpperCAmelCase : int = -1 , _UpperCAmelCase : int = -1 , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional["TensorType"] = None , _UpperCAmelCase : int = 3 , _UpperCAmelCase : int = 40 , _UpperCAmelCase : int = 40 , ) -> Mapping[str, Any]:
"""simple docstring"""
setattr(processor.image_processor , 'apply_ocr' , _UpperCAmelCase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase = compute_effective_axis_dimension(
_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase = processor.tokenizer.num_special_tokens_to_add(_UpperCAmelCase )
__lowercase = compute_effective_axis_dimension(
_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_UpperCAmelCase )
# Generate dummy inputs according to compute batch and sequence
__lowercase = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
__lowercase = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
__lowercase = self._generate_dummy_images(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase = dict(
processor(
_UpperCAmelCase , text=_UpperCAmelCase , boxes=_UpperCAmelCase , return_tensors=_UpperCAmelCase , ) )
return inputs
| 325 | 1 |
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class _a ( unittest.TestCase):
def UpperCAmelCase__( self : Optional[Any] )-> Dict:
lowerCAmelCase__ : Optional[int] = tf.convert_to_tensor(
[
[
8.222_0991, # 3rd highest value; idx. 0
-0.562_0044,
5.2322_9752,
4.038_6393,
-6.879_8378,
-0.5478_5802,
-3.201_2153,
2.9277_7176,
1.8817_1953,
7.3534_1276, # 5th highest value; idx. 9
8.4320_7833, # 2nd highest value; idx. 10
-9.8571_1836,
-5.9620_9236,
-1.1303_9161,
-7.111_5294,
-0.836_9633,
-5.318_6408,
7.0642_7407,
0.8136_9344,
-0.8202_3817,
-5.917_9796,
0.5881_3443,
-6.9977_8438,
4.7155_1189,
-0.1877_1637,
7.4402_0759, # 4th highest value; idx. 25
9.3845_0987, # 1st highest value; idx. 26
2.1266_2941,
-9.3256_2038,
2.3565_2522,
], # cummulative prob of 5 highest values <= 0.6
[
0.5842_5518,
4.5313_9238,
-5.5751_0464,
-6.2803_0699,
-7.1952_9503,
-4.0212_2551,
1.3933_7037,
-6.0670_7057,
1.5948_0517,
-9.64_3119,
0.0390_7799,
0.6723_1762,
-8.8820_6726,
6.2711_5922, # 4th highest value; idx. 13
2.2852_0723,
4.8276_7506,
4.3042_1368,
8.827_5313, # 2nd highest value; idx. 17
5.4402_9958, # 5th highest value; idx. 18
-4.473_5794,
7.3857_9536, # 3rd highest value; idx. 20
-2.9105_1663,
2.6194_6077,
-2.567_4762,
-9.4895_9302,
-4.0292_2645,
-1.3541_6918,
9.6770_2323, # 1st highest value; idx. 27
-5.8947_8553,
1.8537_0467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
lowerCAmelCase__ : Tuple = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
lowerCAmelCase__ : List[Any] = tf.convert_to_tensor(
[8.22_2099, 7.353_4126, 8.43_2078, 7.440_2075, 9.3_8451, 6.27_1159, 8.82_7531, 5.440_2995, 7.385_7956, 9.67_7023] , dtype=tf.floataa , ) # expected non filtered values as noted above
lowerCAmelCase__ : Tuple = tf_top_k_top_p_filtering(_SCREAMING_SNAKE_CASE , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
lowerCAmelCase__ : int = output[output != -float('''inf''' )]
lowerCAmelCase__ : List[Any] = tf.cast(
tf.where(tf.not_equal(_SCREAMING_SNAKE_CASE , tf.constant(-float('''inf''' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , rtol=1E-12 )
tf.debugging.assert_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@require_tf
class _a ( unittest.TestCase , _lowercase):
# setting framework_dependent_parameters needs to be gated, just like its contents' imports
if is_tf_available():
_a : List[Any] = {
'''AutoModelForCausalLM''': TFAutoModelForCausalLM,
'''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq,
'''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM,
'''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq,
'''LogitsProcessorList''': TFLogitsProcessorList,
'''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor,
'''create_tensor_fn''': tf.convert_to_tensor,
'''floats_tensor''': floats_tensor,
'''return_tensors''': '''tf''',
}
@slow
def UpperCAmelCase__( self : List[str] )-> Any:
# TF-only test: tf.saved_model export
lowerCAmelCase__ : Dict = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCAmelCase__ : Optional[Any] = 2
lowerCAmelCase__ : Optional[Any] = 2
class _a ( tf.Module):
def __init__( self : int , _SCREAMING_SNAKE_CASE : Optional[Any] )-> Union[str, Any]:
super(_SCREAMING_SNAKE_CASE , self ).__init__()
lowerCAmelCase__ : Dict = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((None, input_length) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=_SCREAMING_SNAKE_CASE , )
def UpperCAmelCase__( self : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Dict )-> Optional[int]:
lowerCAmelCase__ : Dict = self.model.generate(
input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , max_new_tokens=_SCREAMING_SNAKE_CASE , return_dict_in_generate=_SCREAMING_SNAKE_CASE , )
return {"sequences": outputs["sequences"]}
lowerCAmelCase__ : List[str] = [[2, 0], [102, 103]]
lowerCAmelCase__ : int = [[1, 0], [1, 1]]
lowerCAmelCase__ : Dict = DummyModel(model=_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , signatures={'''serving_default''': dummy_model.serving} )
lowerCAmelCase__ : str = tf.saved_model.load(_SCREAMING_SNAKE_CASE ).signatures['''serving_default''']
for batch_size in range(1 , len(_SCREAMING_SNAKE_CASE ) + 1 ):
lowerCAmelCase__ : List[Any] = {
'''input_ids''': tf.constant(dummy_input_ids[:batch_size] ),
'''attention_mask''': tf.constant(dummy_attention_masks[:batch_size] ),
}
lowerCAmelCase__ : Any = serving_func(**_SCREAMING_SNAKE_CASE )['''sequences''']
lowerCAmelCase__ : Any = test_model.generate(**_SCREAMING_SNAKE_CASE , max_new_tokens=_SCREAMING_SNAKE_CASE )
tf.debugging.assert_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase__( self : Any )-> Union[str, Any]:
# TF-only test: tf.saved_model export
lowerCAmelCase__ : List[Any] = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCAmelCase__ : Tuple = 1
lowerCAmelCase__ : int = 2
class _a ( tf.Module):
def __init__( self : List[Any] , _SCREAMING_SNAKE_CASE : Tuple )-> Union[str, Any]:
super(_SCREAMING_SNAKE_CASE , self ).__init__()
lowerCAmelCase__ : List[Any] = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((batch_size, None) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=_SCREAMING_SNAKE_CASE , )
def UpperCAmelCase__( self : Dict , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any] )-> str:
lowerCAmelCase__ : List[str] = self.model.generate(
input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , max_new_tokens=_SCREAMING_SNAKE_CASE , return_dict_in_generate=_SCREAMING_SNAKE_CASE , )
return {"sequences": outputs["sequences"]}
lowerCAmelCase__ : int = [[2], [102, 103]]
lowerCAmelCase__ : List[Any] = [[1], [1, 1]]
lowerCAmelCase__ : Optional[int] = DummyModel(model=_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , signatures={'''serving_default''': dummy_model.serving} )
lowerCAmelCase__ : Dict = tf.saved_model.load(_SCREAMING_SNAKE_CASE ).signatures['''serving_default''']
for input_row in range(len(_SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase__ : Dict = {
'''input_ids''': tf.constant([dummy_input_ids[input_row]] ),
'''attention_mask''': tf.constant([dummy_attention_masks[input_row]] ),
}
lowerCAmelCase__ : Any = serving_func(**_SCREAMING_SNAKE_CASE )['''sequences''']
lowerCAmelCase__ : Dict = test_model.generate(**_SCREAMING_SNAKE_CASE , max_new_tokens=_SCREAMING_SNAKE_CASE )
tf.debugging.assert_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
@require_tensorflow_text
def UpperCAmelCase__( self : Union[str, Any] )-> int:
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='''google/flan-t5-small''' , filename='''spiece.model''' , local_dir=_SCREAMING_SNAKE_CASE )
class _a ( tf.keras.layers.Layer):
def __init__( self : str )-> int:
super().__init__()
lowerCAmelCase__ : List[Any] = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(_SCREAMING_SNAKE_CASE , '''spiece.model''' ) , '''rb''' ).read() )
lowerCAmelCase__ : int = TFAutoModelForSeqaSeqLM.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
def UpperCAmelCase__( self : List[str] , _SCREAMING_SNAKE_CASE : Tuple , *_SCREAMING_SNAKE_CASE : Any , **_SCREAMING_SNAKE_CASE : Optional[Any] )-> Tuple:
lowerCAmelCase__ : int = self.tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = text.pad_model_inputs(
_SCREAMING_SNAKE_CASE , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
lowerCAmelCase__ : Tuple = self.model.generate(input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
return self.tokenizer.detokenize(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Any = CompleteSentenceTransformer()
lowerCAmelCase__ : List[Any] = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='''inputs''' )
lowerCAmelCase__ : Optional[Any] = complete_model(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Optional[int] = tf.keras.Model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
keras_model.save(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Union[str, Any] )-> List[str]:
# Has PT equivalent: this test relies on random sampling
lowerCAmelCase__ : Tuple = {
'''do_sample''': True,
'''num_beams''': 1,
'''top_p''': 0.7,
'''top_k''': 10,
'''temperature''': 0.7,
}
lowerCAmelCase__ : Tuple = 14
lowerCAmelCase__ : Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCAmelCase__ : Optional[Any] = '''Hello, my dog is cute and'''
lowerCAmelCase__ : Optional[int] = tokenizer(_SCREAMING_SNAKE_CASE , return_tensors='''tf''' )
lowerCAmelCase__ : Union[str, Any] = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCAmelCase__ : Optional[int] = 638
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
lowerCAmelCase__ : Tuple = model.generate(**_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
self.assertTrue(expectation == len(generated_tokens[0] ) )
lowerCAmelCase__ : Optional[int] = [638, 198]
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
lowerCAmelCase__ : Optional[int] = model.generate(**_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def UpperCAmelCase__( self : Optional[int] )-> Optional[int]:
# Has PT equivalent: ample use of framework-specific code
lowerCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
lowerCAmelCase__ : Union[str, Any] = '''Hugging Face is a technology company based in New York and Paris.'''
lowerCAmelCase__ : List[Any] = bart_tokenizer(_SCREAMING_SNAKE_CASE , return_tensors='''tf''' ).input_ids
lowerCAmelCase__ : List[str] = TFBartForConditionalGeneration.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
lowerCAmelCase__ : Optional[Any] = bart_model.generate(_SCREAMING_SNAKE_CASE ).numpy()
class _a ( _lowercase):
def UpperCAmelCase__( self : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any=None , **_SCREAMING_SNAKE_CASE : int )-> List[Any]:
return super().call(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Dict = FakeBart.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
lowerCAmelCase__ : List[str] = bart_model.generate(_SCREAMING_SNAKE_CASE , foo='''bar''' ).numpy()
self.assertTrue(np.array_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
class _a ( bart_model.model.encoder.__class__):
def UpperCAmelCase__( self : str , _SCREAMING_SNAKE_CASE : Tuple , **_SCREAMING_SNAKE_CASE : Tuple )-> List[Any]:
return super().call(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Optional[int] = FakeEncoder(bart_model.config , bart_model.model.shared )
lowerCAmelCase__ : int = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
lowerCAmelCase__ : Optional[int] = bart_model.generate(_SCREAMING_SNAKE_CASE ).numpy()
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(_SCREAMING_SNAKE_CASE , foo='''bar''' )
| 211 |
def lowerCamelCase_ ( _a = 4_000_000 ):
"""simple docstring"""
lowerCAmelCase__ : str = []
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(_a )
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = b, a + b
return sum(_a )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 211 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.