code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase_ ( _a , _a , unittest.TestCase ):
"""simple docstring"""
lowercase = StableDiffusionXLImgaImgPipeline
lowercase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
lowercase = PipelineTesterMixin.required_optional_params - {"latents"}
lowercase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowercase = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowercase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase ( self : List[Any] ):
torch.manual_seed(0 )
snake_case__ : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=snake_case_ , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
snake_case__ : Optional[int] = EulerDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , )
torch.manual_seed(0 )
snake_case__ : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
snake_case__ : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=32 , )
snake_case__ : Optional[Any] = CLIPTextModel(snake_case_ )
snake_case__ : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=snake_case_ )
snake_case__ : Dict = CLIPTextModelWithProjection(snake_case_ )
snake_case__ : List[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=snake_case_ )
snake_case__ : List[str] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""text_encoder_2""": text_encoder_a,
"""tokenizer_2""": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def lowerCamelCase ( self : Dict , snake_case_ : str , snake_case_ : Tuple=0 ):
snake_case__ : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
snake_case__ : Union[str, Any] = image / 2 + 0.5
if str(snake_case_ ).startswith("""mps""" ):
snake_case__ : List[str] = torch.manual_seed(snake_case_ )
else:
snake_case__ : List[Any] = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
snake_case__ : Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 5.0,
"""output_type""": """numpy""",
"""strength""": 0.75,
}
return inputs
def lowerCamelCase ( self : Dict ):
snake_case__ : List[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case__ : Optional[Any] = self.get_dummy_components()
snake_case__ : Any = StableDiffusionXLImgaImgPipeline(**snake_case_ )
snake_case__ : Optional[int] = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
snake_case__ : Optional[Any] = self.get_dummy_inputs(snake_case_ )
snake_case__ : Any = sd_pipe(**snake_case_ ).images
snake_case__ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case__ : Dict = np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase ( self : Union[str, Any] ):
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def lowerCamelCase ( self : Any ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def lowerCamelCase ( self : Any ):
pass
def lowerCamelCase ( self : List[str] ):
snake_case__ : Union[str, Any] = self.get_dummy_components()
snake_case__ : str = StableDiffusionXLImgaImgPipeline(**snake_case_ )
snake_case__ : Optional[int] = sd_pipe.to(snake_case_ )
snake_case__ : Optional[Any] = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
# forward without prompt embeds
snake_case__ : Union[str, Any] = self.get_dummy_inputs(snake_case_ )
snake_case__ : List[Any] = 3 * ["""this is a negative prompt"""]
snake_case__ : Union[str, Any] = negative_prompt
snake_case__ : Optional[Any] = 3 * [inputs["""prompt"""]]
snake_case__ : str = sd_pipe(**snake_case_ )
snake_case__ : str = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
snake_case__ : Union[str, Any] = self.get_dummy_inputs(snake_case_ )
snake_case__ : Any = 3 * ["""this is a negative prompt"""]
snake_case__ : Any = 3 * [inputs.pop("""prompt""" )]
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) : Union[str, Any] = sd_pipe.encode_prompt(snake_case_ , negative_prompt=snake_case_ )
snake_case__ : List[str] = sd_pipe(
**snake_case_ , prompt_embeds=snake_case_ , negative_prompt_embeds=snake_case_ , pooled_prompt_embeds=snake_case_ , negative_pooled_prompt_embeds=snake_case_ , )
snake_case__ : Optional[int] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase ( self : Union[str, Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self : Optional[Any] , snake_case_ : Optional[int] , snake_case_ : Optional[int]="cpu" , snake_case_ : Dict=torch.floataa , snake_case_ : Optional[int]=0 ):
snake_case__ : Tuple = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
snake_case__ : List[Any] = np.random.RandomState(snake_case_ ).standard_normal((1, 4, 64, 64) )
snake_case__ : str = torch.from_numpy(snake_case_ ).to(device=snake_case_ , dtype=snake_case_ )
snake_case__ : List[Any] = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase ( self : List[Any] ):
snake_case__ : Optional[Any] = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
snake_case__ : Optional[Any] = self.get_inputs(snake_case_ )
snake_case__ : List[str] = pipe(**snake_case_ ).images
snake_case__ : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
snake_case__ : int = np.array([0.49493, 0.47896, 0.40798, 0.54214, 0.53212, 0.48202, 0.47656, 0.46329, 0.48506] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 35 |
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
_SCREAMING_SNAKE_CASE : Union[str, Any] = ['''\nclass''', '''\ndef''', '''\n#''', '''\n@''', '''\nprint''', '''\nif''']
class a ( __snake_case ):
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Tuple=1 ) -> str:
lowerCamelCase_ = tokenizer
lowerCamelCase_ = dataset
lowerCamelCase_ = len(__SCREAMING_SNAKE_CASE ) if n_tasks is None else n_tasks
lowerCamelCase_ = n_copies
def __iter__( self : Dict ) -> Any:
lowerCamelCase_ = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
lowerCamelCase_ = self.tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class a ( __snake_case ):
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Any ) -> List[Any]:
lowerCamelCase_ = start_length
lowerCamelCase_ = eof_strings
lowerCamelCase_ = tokenizer
def __call__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Dict , **__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[Any]:
lowerCamelCase_ = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
lowerCamelCase_ = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( _lowerCamelCase : List[Any] ) -> Tuple:
lowerCamelCase_ = re.split('(%s)' % '|'.join(_lowerCamelCase ) , _lowerCamelCase )
# last string should be ""
return "".join(string_list[:-2] )
def lowerCamelCase__ ( _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : Dict=20 , **_lowerCamelCase : Dict ) -> List[str]:
lowerCamelCase_ = defaultdict(_lowerCamelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowerCamelCase ) ):
with torch.no_grad():
lowerCamelCase_ = batch['ids'].shape[-1]
lowerCamelCase_ = accelerator.unwrap_model(_lowerCamelCase ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_lowerCamelCase , **_lowerCamelCase )
# each task is generated batch_size times
lowerCamelCase_ = batch['task_id'].repeat(_lowerCamelCase )
lowerCamelCase_ = accelerator.pad_across_processes(
_lowerCamelCase , dim=1 , pad_index=tokenizer.pad_token_id )
lowerCamelCase_ , lowerCamelCase_ = accelerator.gather((generated_tokens, generated_tasks) )
lowerCamelCase_ = generated_tokens.cpu().numpy()
lowerCamelCase_ = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowerCamelCase , _lowerCamelCase ):
gen_token_dict[task].append(_lowerCamelCase )
lowerCamelCase_ = [[] for _ in range(_lowerCamelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
lowerCamelCase_ = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
code_gens[task].append(remove_last_block(_lowerCamelCase ) )
return code_gens
def lowerCamelCase__ ( ) -> Tuple:
# Setup configuration
lowerCamelCase_ = HfArgumentParser(_lowerCamelCase )
lowerCamelCase_ = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
lowerCamelCase_ = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
lowerCamelCase_ = 'false'
if args.num_workers is None:
lowerCamelCase_ = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
lowerCamelCase_ = Accelerator()
set_seed(args.seed , device_specific=_lowerCamelCase )
# Load model and tokenizer
lowerCamelCase_ = AutoTokenizer.from_pretrained(args.model_ckpt )
lowerCamelCase_ = tokenizer.eos_token
lowerCamelCase_ = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
lowerCamelCase_ = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowerCamelCase , _lowerCamelCase )] ),
}
# Load evaluation dataset and metric
lowerCamelCase_ = load_dataset('openai_humaneval' )
lowerCamelCase_ = load_metric('code_eval' )
lowerCamelCase_ = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
lowerCamelCase_ = args.n_samples // args.batch_size
lowerCamelCase_ = TokenizedDataset(_lowerCamelCase , human_eval['test'] , n_copies=_lowerCamelCase , n_tasks=_lowerCamelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
lowerCamelCase_ = DataLoader(_lowerCamelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
lowerCamelCase_ = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
lowerCamelCase_ , lowerCamelCase_ = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase_ = complete_code(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , n_tasks=_lowerCamelCase , batch_size=args.batch_size , **_lowerCamelCase , )
if accelerator.is_main_process:
lowerCamelCase_ = []
for task in tqdm(range(_lowerCamelCase ) ):
lowerCamelCase_ = human_eval['test'][task]['test']
lowerCamelCase_ = F'''check({human_eval["test"][task]["entry_point"]})'''
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
lowerCamelCase_ , lowerCamelCase_ = code_eval_metric.compute(
references=_lowerCamelCase , predictions=_lowerCamelCase , num_workers=args.num_workers )
print(F'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 183 | 0 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : list[list[int]] , _lowercase : int , _lowercase : int , _lowercase : set ) ->int:
'''simple docstring'''
a, a : List[str] = len(_lowercase ), len(grid[0] )
if (
min(_lowercase , _lowercase ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
a : Union[str, Any] = 0
count += depth_first_search(_lowercase , row + 1 , _lowercase , _lowercase )
count += depth_first_search(_lowercase , row - 1 , _lowercase , _lowercase )
count += depth_first_search(_lowercase , _lowercase , col + 1 , _lowercase )
count += depth_first_search(_lowercase , _lowercase , col - 1 , _lowercase )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 79 |
"""simple docstring"""
a : Optional[int] = 8.31_4462 # Unit - J mol-1 K-1
def _SCREAMING_SNAKE_CASE ( _lowercase : float , _lowercase : float , _lowercase : float ) ->float:
'''simple docstring'''
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def _SCREAMING_SNAKE_CASE ( _lowercase : float , _lowercase : float , _lowercase : float ) ->float:
'''simple docstring'''
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 79 | 1 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def lowerCamelCase ( lowerCAmelCase : List[str] ):
"""simple docstring"""
create_state_space_tree(_a , [] , 0 )
def lowerCamelCase ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Dict ):
"""simple docstring"""
if index == len(_a ):
print(_a )
return
create_state_space_tree(_a , _a , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(_a , _a , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
lowerCAmelCase :list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['''A''', '''B''', '''C'''])
generate_all_subsequences(seq) | 331 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ : str = {
'''configuration_x_clip''': [
'''XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XCLIPConfig''',
'''XCLIPTextConfig''',
'''XCLIPVisionConfig''',
],
'''processing_x_clip''': ['''XCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Tuple = [
'''XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XCLIPModel''',
'''XCLIPPreTrainedModel''',
'''XCLIPTextModel''',
'''XCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
lowercase__ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 264 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class SCREAMING_SNAKE_CASE :
lowerCAmelCase = 42
lowerCAmelCase = None
lowerCAmelCase = None
def _lowerCAmelCase ( ) -> Node | None:
__A : List[Any] = Node(1 )
__A : Dict = Node(2 )
__A : Optional[int] = Node(3 )
__A : str = Node(4 )
__A : Optional[Any] = Node(5 )
return tree
def _lowerCAmelCase ( __snake_case : Node | None ) -> list[int]:
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def _lowerCAmelCase ( __snake_case : Node | None ) -> list[int]:
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def _lowerCAmelCase ( __snake_case : Node | None ) -> list[int]:
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def _lowerCAmelCase ( __snake_case : Node | None ) -> int:
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def _lowerCAmelCase ( __snake_case : Node | None ) -> Sequence[Node | None]:
__A : list[Any] = []
if root is None:
return output
__A : Union[str, Any] = deque([root] )
while process_queue:
__A : List[str] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def _lowerCAmelCase ( __snake_case : Node | None , __snake_case : int ) -> Sequence[Node | None]:
__A : list[Any] = []
def populate_output(__snake_case : Node | None , __snake_case : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(snake_case__ , snake_case__ )
return output
def _lowerCAmelCase ( __snake_case : Node | None , __snake_case : int ) -> Sequence[Node | None]:
__A : list[Any] = []
def populate_output(__snake_case : Node | None , __snake_case : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(snake_case__ , snake_case__ )
return output
def _lowerCAmelCase ( __snake_case : Node | None ) -> Sequence[Node | None] | list[Any]:
if root is None:
return []
__A : list[Sequence[Node | None]] = []
__A : Union[str, Any] = 0
__A : List[str] = height(snake_case__ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(snake_case__ , snake_case__ ) )
__A : Tuple = 1
else:
output.append(get_nodes_from_right_to_left(snake_case__ , snake_case__ ) )
__A : Dict = 0
return output
def _lowerCAmelCase ( ) -> None: # Main function for testing.
__A : List[str] = make_tree()
print(f'In-order Traversal: {inorder(snake_case__ )}' )
print(f'Pre-order Traversal: {preorder(snake_case__ )}' )
print(f'Post-order Traversal: {postorder(snake_case__ )}' , '\n' )
print(f'Height of Tree: {height(snake_case__ )}' , '\n' )
print('Complete Level Order Traversal: ' )
print(level_order(snake_case__ ) , '\n' )
print('Level-wise order Traversal: ' )
for level in range(1 , height(snake_case__ ) + 1 ):
print(f'Level {level}:' , get_nodes_from_left_to_right(snake_case__ , level=snake_case__ ) )
print('\nZigZag order Traversal: ' )
print(zigzag(snake_case__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 365 |
'''simple docstring'''
import itertools
import math
def _lowerCAmelCase ( __snake_case : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCAmelCase ( ) -> List[Any]:
__A : Optional[Any] = 2
while True:
if is_prime(__snake_case ):
yield num
num += 1
def _lowerCAmelCase ( __snake_case : int = 1_00_01 ) -> int:
return next(itertools.islice(prime_generator() , nth - 1 , __snake_case ) )
if __name__ == "__main__":
print(f"""{solution() = }""") | 190 | 0 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
a_ : Dict = logging.get_logger(__name__)
a_ : str = {
"""Intel/dpt-large""": """https://huggingface.co/Intel/dpt-large/resolve/main/config.json""",
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Tuple ='dpt'
def __init__( self, lowerCAmelCase=768, lowerCAmelCase=12, lowerCAmelCase=12, lowerCAmelCase=3_072, lowerCAmelCase="gelu", lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, lowerCAmelCase=1e-12, lowerCAmelCase=384, lowerCAmelCase=16, lowerCAmelCase=3, lowerCAmelCase=False, lowerCAmelCase=True, lowerCAmelCase=[2, 5, 8, 11], lowerCAmelCase="project", lowerCAmelCase=[4, 2, 1, 0.5], lowerCAmelCase=[96, 192, 384, 768], lowerCAmelCase=256, lowerCAmelCase=-1, lowerCAmelCase=False, lowerCAmelCase=True, lowerCAmelCase=0.4, lowerCAmelCase=255, lowerCAmelCase=0.1, lowerCAmelCase=[1, 1_024, 24, 24], lowerCAmelCase=[0, 1], lowerCAmelCase=None, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(**lowerCAmelCase )
lowerCamelCase_ =hidden_size
lowerCamelCase_ =is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('''Initializing the config with a `BiT` backbone.''' )
lowerCamelCase_ ={
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
}
lowerCamelCase_ =BitConfig(**lowerCAmelCase )
elif isinstance(lowerCAmelCase, lowerCAmelCase ):
logger.info('''Initializing the config with a `BiT` backbone.''' )
lowerCamelCase_ =BitConfig(**lowerCAmelCase )
elif isinstance(lowerCAmelCase, lowerCAmelCase ):
lowerCamelCase_ =backbone_config
else:
raise ValueError(
f'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
lowerCamelCase_ =backbone_featmap_shape
lowerCamelCase_ =neck_ignore_stages
if readout_type != "project":
raise ValueError('''Readout type must be \'project\' when using `DPT-hybrid` mode.''' )
else:
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =[]
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =hidden_act
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =initializer_range
lowerCamelCase_ =layer_norm_eps
lowerCamelCase_ =image_size
lowerCamelCase_ =patch_size
lowerCamelCase_ =num_channels
lowerCamelCase_ =qkv_bias
lowerCamelCase_ =backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('''Readout_type must be one of [\'ignore\', \'add\', \'project\']''' )
lowerCamelCase_ =readout_type
lowerCamelCase_ =reassemble_factors
lowerCamelCase_ =neck_hidden_sizes
lowerCamelCase_ =fusion_hidden_size
lowerCamelCase_ =head_in_index
lowerCamelCase_ =use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
lowerCamelCase_ =use_auxiliary_head
lowerCamelCase_ =auxiliary_loss_weight
lowerCamelCase_ =semantic_loss_ignore_index
lowerCamelCase_ =semantic_classifier_dropout
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowerCamelCase_ =self.backbone_config.to_dict()
lowerCamelCase_ =self.__class__.model_type
return output
| 75 |
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class a ( a_ ):
UpperCAmelCase_ : List[Any] =["image_processor", "tokenizer"]
UpperCAmelCase_ : str ="AutoImageProcessor"
UpperCAmelCase_ : Any ="AutoTokenizer"
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase ):
lowercase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _lowerCamelCase , )
lowercase = kwargs.pop('feature_extractor' )
lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_lowerCamelCase , _lowerCamelCase )
lowercase = self.image_processor
lowercase = False
def __call__( self , *_lowerCamelCase , **_lowerCamelCase ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_lowerCamelCase , **_lowerCamelCase )
lowercase = kwargs.pop('images' , _lowerCamelCase )
lowercase = kwargs.pop('text' , _lowerCamelCase )
if len(_lowerCamelCase ) > 0:
lowercase = args[0]
lowercase = args[1:]
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
lowercase = self.image_processor(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
if text is not None:
lowercase = self.tokenizer(_lowerCamelCase , **_lowerCamelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowercase = encodings['input_ids']
return inputs
def UpperCamelCase_ ( self , *_lowerCamelCase , **_lowerCamelCase ):
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase )
def UpperCamelCase_ ( self , *_lowerCamelCase , **_lowerCamelCase ):
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase )
@contextmanager
def UpperCamelCase_ ( self ):
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your images inputs, or in a separate call.' )
lowercase = True
lowercase = self.tokenizer
yield
lowercase = self.image_processor
lowercase = False
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=None ):
if added_vocab is None:
lowercase = self.tokenizer.get_added_vocab()
lowercase = {}
while tokens:
lowercase = re.search(R'<s_(.*?)>' , _lowerCamelCase , re.IGNORECASE )
if start_token is None:
break
lowercase = start_token.group(1 )
lowercase = re.search(RF'</s_{key}>' , _lowerCamelCase , re.IGNORECASE )
lowercase = start_token.group()
if end_token is None:
lowercase = tokens.replace(_lowerCamelCase , '' )
else:
lowercase = end_token.group()
lowercase = re.escape(_lowerCamelCase )
lowercase = re.escape(_lowerCamelCase )
lowercase = re.search(F'{start_token_escaped}(.*?){end_token_escaped}' , _lowerCamelCase , re.IGNORECASE )
if content is not None:
lowercase = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
lowercase = self.tokenajson(_lowerCamelCase , is_inner_value=_lowerCamelCase , added_vocab=_lowerCamelCase )
if value:
if len(_lowerCamelCase ) == 1:
lowercase = value[0]
lowercase = value
else: # leaf nodes
lowercase = []
for leaf in content.split(R'<sep/>' ):
lowercase = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
lowercase = leaf[1:-2] # for categorical special tokens
output[key].append(_lowerCamelCase )
if len(output[key] ) == 1:
lowercase = output[key][0]
lowercase = tokens[tokens.find(_lowerCamelCase ) + len(_lowerCamelCase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=_lowerCamelCase , added_vocab=_lowerCamelCase )
if len(_lowerCamelCase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def UpperCamelCase_ ( self ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _lowerCamelCase , )
return self.image_processor_class
@property
def UpperCamelCase_ ( self ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _lowerCamelCase , )
return self.image_processor
| 220 | 0 |
'''simple docstring'''
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
lowerCAmelCase : List[Any] = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {
"""artists_file""": """artists.json""",
"""lyrics_file""": """lyrics.json""",
"""genres_file""": """genres.json""",
}
lowerCAmelCase : Optional[Any] = {
"""artists_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json""",
},
"""genres_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json""",
},
"""lyrics_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json""",
},
}
lowerCAmelCase : List[Any] = {
"""jukebox""": 5_12,
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_LYRIC_TOKENS_SIZES
__magic_name__ = ["input_ids", "attention_mask"]
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__=["v3", "v2", "v2"] , snake_case__=512 , snake_case__=5 , snake_case__="<|endoftext|>" , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : Tuple = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else unk_token
super().__init__(
unk_token=snake_case__ , n_genres=snake_case__ , version=snake_case__ , max_n_lyric_tokens=snake_case__ , **snake_case__ , )
_lowerCAmelCase : Tuple = version
_lowerCAmelCase : Optional[int] = max_n_lyric_tokens
_lowerCAmelCase : Tuple = n_genres
with open(snake_case__ , encoding='utf-8' ) as vocab_handle:
_lowerCAmelCase : List[str] = json.load(snake_case__ )
with open(snake_case__ , encoding='utf-8' ) as vocab_handle:
_lowerCAmelCase : List[Any] = json.load(snake_case__ )
with open(snake_case__ , encoding='utf-8' ) as vocab_handle:
_lowerCAmelCase : Optional[Any] = json.load(snake_case__ )
_lowerCAmelCase : Any = R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+'
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
_lowerCAmelCase : Union[str, Any] = oov.replace(R'\-\'' , R'\-+\'' )
_lowerCAmelCase : Optional[int] = regex.compile(snake_case__ )
_lowerCAmelCase : Tuple = {v: k for k, v in self.artists_encoder.items()}
_lowerCAmelCase : Union[str, Any] = {v: k for k, v in self.genres_encoder.items()}
_lowerCAmelCase : Optional[Any] = {v: k for k, v in self.lyrics_encoder.items()}
@property
def a ( self ):
'''simple docstring'''
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def a ( self ):
'''simple docstring'''
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def a ( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = [self.artists_encoder.get(snake_case__ , 0 ) for artist in list_artists]
for genres in range(len(snake_case__ ) ):
_lowerCAmelCase : List[str] = [self.genres_encoder.get(snake_case__ , 0 ) for genre in list_genres[genres]]
_lowerCAmelCase : Any = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
_lowerCAmelCase : List[str] = [[self.lyrics_encoder.get(snake_case__ , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def a ( self , snake_case__ ):
'''simple docstring'''
return list(snake_case__ )
def a ( self , snake_case__ , snake_case__ , snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.prepare_for_tokenization(snake_case__ , snake_case__ , snake_case__ )
_lowerCAmelCase : str = self._tokenize(snake_case__ )
return artist, genre, lyrics
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = False ):
'''simple docstring'''
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
_lowerCAmelCase : int = artists[idx].lower()
_lowerCAmelCase : Tuple = [genres[idx].lower()]
else:
_lowerCAmelCase : Optional[int] = self._normalize(artists[idx] ) + '.v2'
_lowerCAmelCase : str = [
self._normalize(snake_case__ ) + '.v2' for genre in genres[idx].split('_' )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
_lowerCAmelCase : Tuple = regex.compile(R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+' )
_lowerCAmelCase : str = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n'
_lowerCAmelCase : int = {vocab[index]: index + 1 for index in range(len(snake_case__ ) )}
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Optional[Any] = len(snake_case__ ) + 1
_lowerCAmelCase : List[str] = self.vocab
_lowerCAmelCase : Any = {v: k for k, v in self.vocab.items()}
_lowerCAmelCase : List[Any] = ''
else:
_lowerCAmelCase : List[str] = regex.compile(R'[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+' )
_lowerCAmelCase : List[str] = self._run_strip_accents(snake_case__ )
_lowerCAmelCase : Optional[int] = lyrics.replace('\\' , '\n' )
_lowerCAmelCase : Tuple = self.out_of_vocab.sub('' , snake_case__ ), [], []
return artists, genres, lyrics
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : List[str] = unicodedata.normalize('NFD' , snake_case__ )
_lowerCAmelCase : Optional[Any] = []
for char in text:
_lowerCAmelCase : Dict = unicodedata.category(snake_case__ )
if cat == "Mn":
continue
output.append(snake_case__ )
return "".join(snake_case__ )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : str = (
[chr(snake_case__ ) for i in range(ord('a' ) , ord('z' ) + 1 )]
+ [chr(snake_case__ ) for i in range(ord('A' ) , ord('Z' ) + 1 )]
+ [chr(snake_case__ ) for i in range(ord('0' ) , ord('9' ) + 1 )]
+ ['.']
)
_lowerCAmelCase : List[str] = frozenset(snake_case__ )
_lowerCAmelCase : List[str] = re.compile(R'_+' )
_lowerCAmelCase : Optional[Any] = ''.join([c if c in accepted else '_' for c in text.lower()] )
_lowerCAmelCase : Optional[int] = pattern.sub('_' , snake_case__ ).strip('_' )
return text
def a ( self , snake_case__ ):
'''simple docstring'''
return " ".join(snake_case__ )
def a ( self , snake_case__ , snake_case__ = None , snake_case__ = False ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
_lowerCAmelCase : Dict = TensorType(snake_case__ )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.' )
import tensorflow as tf
_lowerCAmelCase : List[str] = tf.constant
_lowerCAmelCase : str = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('Unable to convert output to PyTorch tensors format, PyTorch is not installed.' )
import torch
_lowerCAmelCase : Union[str, Any] = torch.tensor
_lowerCAmelCase : int = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('Unable to convert output to JAX tensors format, JAX is not installed.' )
import jax.numpy as jnp # noqa: F811
_lowerCAmelCase : int = jnp.array
_lowerCAmelCase : Optional[Any] = _is_jax
else:
_lowerCAmelCase : List[Any] = np.asarray
_lowerCAmelCase : List[Any] = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
_lowerCAmelCase : Dict = [inputs]
if not is_tensor(snake_case__ ):
_lowerCAmelCase : Tuple = as_tensor(snake_case__ )
except: # noqa E722
raise ValueError(
'Unable to create tensor, you should probably activate truncation and/or padding '
'with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.' )
return inputs
def __call__( self , snake_case__ , snake_case__ , snake_case__="" , snake_case__="pt" ):
'''simple docstring'''
_lowerCAmelCase : Dict = [0, 0, 0]
_lowerCAmelCase : List[Any] = [artist] * len(self.version )
_lowerCAmelCase : Tuple = [genres] * len(self.version )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.tokenize(snake_case__ , snake_case__ , snake_case__ )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = self._convert_token_to_id(snake_case__ , snake_case__ , snake_case__ )
_lowerCAmelCase : Optional[Any] = [-INFINITY] * len(full_tokens[-1] )
_lowerCAmelCase : List[str] = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=snake_case__ )
for i in range(len(self.version ) )
]
return BatchEncoding({'input_ids': input_ids, 'attention_masks': attention_masks} )
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCAmelCase : Tuple = os.path.join(
snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['artists_file'] )
with open(snake_case__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=snake_case__ ) )
_lowerCAmelCase : Dict = os.path.join(
snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['genres_file'] )
with open(snake_case__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=snake_case__ ) )
_lowerCAmelCase : List[str] = os.path.join(
snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['lyrics_file'] )
with open(snake_case__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=snake_case__ ) )
return (artists_file, genres_file, lyrics_file)
def a ( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.artists_decoder.get(snake_case__ )
_lowerCAmelCase : Optional[int] = [self.genres_decoder.get(snake_case__ ) for genre in genres_index]
_lowerCAmelCase : Any = [self.lyrics_decoder.get(snake_case__ ) for character in lyric_index]
return artist, genres, lyrics
| 25 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase : List[Any] = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {
"""shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "nat"
__magic_name__ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , snake_case__=4 , snake_case__=3 , snake_case__=64 , snake_case__=[3, 4, 6, 5] , snake_case__=[2, 4, 8, 16] , snake_case__=7 , snake_case__=3.0 , snake_case__=True , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.1 , snake_case__="gelu" , snake_case__=0.02 , snake_case__=1E-5 , snake_case__=0.0 , snake_case__=None , snake_case__=None , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : Union[str, Any] = patch_size
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : Tuple = embed_dim
_lowerCAmelCase : Any = depths
_lowerCAmelCase : Dict = len(snake_case__ )
_lowerCAmelCase : str = num_heads
_lowerCAmelCase : Dict = kernel_size
_lowerCAmelCase : Union[str, Any] = mlp_ratio
_lowerCAmelCase : int = qkv_bias
_lowerCAmelCase : Optional[Any] = hidden_dropout_prob
_lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCAmelCase : List[str] = drop_path_rate
_lowerCAmelCase : Union[str, Any] = hidden_act
_lowerCAmelCase : Tuple = layer_norm_eps
_lowerCAmelCase : Dict = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase : str = int(embed_dim * 2 ** (len(snake_case__ ) - 1) )
_lowerCAmelCase : Any = layer_scale_init_value
_lowerCAmelCase : Any = ['stem'] + [F'stage{idx}' for idx in range(1 , len(snake_case__ ) + 1 )]
_lowerCAmelCase , _lowerCAmelCase : str = get_aligned_output_features_output_indices(
out_features=snake_case__ , out_indices=snake_case__ , stage_names=self.stage_names )
| 25 | 1 |
'''simple docstring'''
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
__snake_case ="""scheduler_config.json"""
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Optional[int] = 1
lowerCamelCase : List[str] = 2
lowerCamelCase : str = 3
lowerCamelCase : Any = 4
lowerCamelCase : Dict = 5
@dataclass
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : jnp.ndarray
class UpperCAmelCase_ :
lowerCamelCase : Any = SCHEDULER_CONFIG_NAME
lowerCamelCase : str = ['''dtype''']
lowerCamelCase : int = []
lowerCamelCase : Dict = True
@classmethod
def __UpperCAmelCase ( cls : Union[str, Any] , UpperCAmelCase__ : Dict[str, Any] = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : int=False , **UpperCAmelCase__ : str , ) -> List[Any]:
lowerCAmelCase , lowerCAmelCase = cls.load_config(
pretrained_model_name_or_path=UpperCAmelCase__ , subfolder=UpperCAmelCase__ , return_unused_kwargs=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowerCAmelCase , lowerCAmelCase = cls.from_config(UpperCAmelCase__ , return_unused_kwargs=UpperCAmelCase__ , **UpperCAmelCase__ )
if hasattr(UpperCAmelCase__ , 'create_state' ) and getattr(UpperCAmelCase__ , 'has_state' , UpperCAmelCase__ ):
lowerCAmelCase = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : Union[str, os.PathLike] , UpperCAmelCase__ : bool = False , **UpperCAmelCase__ : Optional[int] ) -> List[Any]:
self.save_config(save_directory=UpperCAmelCase__ , push_to_hub=UpperCAmelCase__ , **UpperCAmelCase__ )
@property
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
return self._get_compatibles()
@classmethod
def __UpperCAmelCase ( cls : List[str] ) -> List[str]:
lowerCAmelCase = list(set([cls.__name__] + cls._compatibles ) )
lowerCAmelCase = importlib.import_module(__name__.split('.' )[0] )
lowerCAmelCase = [
getattr(UpperCAmelCase__ , UpperCAmelCase__ ) for c in compatible_classes_str if hasattr(UpperCAmelCase__ , UpperCAmelCase__ )
]
return compatible_classes
def a_ ( lowerCamelCase : jnp.ndarray , lowerCamelCase : Tuple[int] ):
assert len(lowerCamelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(lowerCamelCase ) - x.ndim) ) , lowerCamelCase )
def a_ ( lowerCamelCase : int , lowerCamelCase : Dict=0.999 , lowerCamelCase : Optional[Any]=jnp.floataa ):
def alpha_bar(lowerCamelCase : Dict ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
lowerCAmelCase = []
for i in range(lowerCamelCase ):
lowerCAmelCase = i / num_diffusion_timesteps
lowerCAmelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(lowerCamelCase ) / alpha_bar(lowerCamelCase ) , lowerCamelCase ) )
return jnp.array(lowerCamelCase , dtype=lowerCamelCase )
@flax.struct.dataclass
class UpperCAmelCase_ :
lowerCamelCase : jnp.ndarray
lowerCamelCase : jnp.ndarray
lowerCamelCase : jnp.ndarray
@classmethod
def __UpperCAmelCase ( cls : Optional[Any] , UpperCAmelCase__ : Tuple ) -> List[str]:
lowerCAmelCase = scheduler.config
if config.trained_betas is not None:
lowerCAmelCase = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
lowerCAmelCase = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCAmelCase = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCAmelCase = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
F'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''' )
lowerCAmelCase = 1.0 - betas
lowerCAmelCase = jnp.cumprod(UpperCAmelCase__ , axis=0 )
return cls(
alphas=UpperCAmelCase__ , betas=UpperCAmelCase__ , alphas_cumprod=UpperCAmelCase__ , )
def a_ ( lowerCamelCase : CommonSchedulerState , lowerCamelCase : jnp.ndarray , lowerCamelCase : jnp.ndarray , lowerCamelCase : jnp.ndarray ):
lowerCAmelCase = state.alphas_cumprod
lowerCAmelCase = alphas_cumprod[timesteps] ** 0.5
lowerCAmelCase = sqrt_alpha_prod.flatten()
lowerCAmelCase = broadcast_to_shape_from_left(lowerCamelCase , original_samples.shape )
lowerCAmelCase = (1 - alphas_cumprod[timesteps]) ** 0.5
lowerCAmelCase = sqrt_one_minus_alpha_prod.flatten()
lowerCAmelCase = broadcast_to_shape_from_left(lowerCamelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def a_ ( lowerCamelCase : CommonSchedulerState , lowerCamelCase : jnp.ndarray , lowerCamelCase : jnp.ndarray , lowerCamelCase : jnp.ndarray ):
lowerCAmelCase , lowerCAmelCase = get_sqrt_alpha_prod(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
lowerCAmelCase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def a_ ( lowerCamelCase : CommonSchedulerState , lowerCamelCase : jnp.ndarray , lowerCamelCase : jnp.ndarray , lowerCamelCase : jnp.ndarray ):
lowerCAmelCase , lowerCAmelCase = get_sqrt_alpha_prod(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
lowerCAmelCase = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 4 |
'''simple docstring'''
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
__snake_case ="""\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",
author = \"Lin, Chin-Yew and
Och, Franz Josef\",
booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",
month = \"aug 23{--}aug 27\",
year = \"2004\",
address = \"Geneva, Switzerland\",
publisher = \"COLING\",
url = \"https://www.aclweb.org/anthology/C04-1072\",
pages = \"501--507\",
}
"""
__snake_case ="""\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,
the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
"""
__snake_case ="""
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
'bleu': bleu score,
'precisions': geometric mean of n-gram precisions,
'brevity_penalty': brevity penalty,
'length_ratio': ratio of lengths,
'translation_length': translation_length,
'reference_length': reference_length
Examples:
>>> predictions = [
... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample
... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample
... ]
>>> references = [
... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)
... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric(\"bleu\")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results[\"bleu\"])
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
def __UpperCAmelCase ( self : Tuple ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : Optional[int]=False ) -> int:
lowerCAmelCase = compute_bleu(
reference_corpus=UpperCAmelCase__ , translation_corpus=UpperCAmelCase__ , max_order=UpperCAmelCase__ , smooth=UpperCAmelCase__ )
((lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase)) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 4 | 1 |
"""simple docstring"""
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
__UpperCamelCase : Optional[int] = sys.version_info >= (3, 10)
def __A ( __lowerCamelCase=None , __lowerCamelCase=None ) -> List[str]:
return field(default_factory=lambda: default , metadata=__lowerCamelCase )
@dataclass
class __lowerCAmelCase :
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 42
@dataclass
class __lowerCAmelCase :
UpperCamelCase__ = 42
UpperCamelCase__ = field(default='''toto''' , metadata={'''help''': '''help message'''} )
@dataclass
class __lowerCAmelCase :
UpperCamelCase__ = False
UpperCamelCase__ = True
UpperCamelCase__ = None
class __lowerCAmelCase ( __magic_name__ ):
UpperCamelCase__ = '''titi'''
UpperCamelCase__ = '''toto'''
class __lowerCAmelCase ( __magic_name__ ):
UpperCamelCase__ = '''titi'''
UpperCamelCase__ = '''toto'''
UpperCamelCase__ = 42
@dataclass
class __lowerCAmelCase :
UpperCamelCase__ = '''toto'''
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = BasicEnum(self.foo )
@dataclass
class __lowerCAmelCase :
UpperCamelCase__ = '''toto'''
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a = MixedTypeEnum(self.foo )
@dataclass
class __lowerCAmelCase :
UpperCamelCase__ = None
UpperCamelCase__ = field(default=__magic_name__ , metadata={'''help''': '''help message'''} )
UpperCamelCase__ = None
UpperCamelCase__ = list_field(default=[] )
UpperCamelCase__ = list_field(default=[] )
@dataclass
class __lowerCAmelCase :
UpperCamelCase__ = list_field(default=[] )
UpperCamelCase__ = list_field(default=[1, 2, 3] )
UpperCamelCase__ = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] )
UpperCamelCase__ = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class __lowerCAmelCase :
UpperCamelCase__ = field()
UpperCamelCase__ = field()
UpperCamelCase__ = field()
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
a = BasicEnum(self.required_enum )
@dataclass
class __lowerCAmelCase :
UpperCamelCase__ = 42
UpperCamelCase__ = field()
UpperCamelCase__ = None
UpperCamelCase__ = field(default='''toto''' , metadata={'''help''': '''help message'''} )
UpperCamelCase__ = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] )
if is_python_no_less_than_3_10:
@dataclass
class __lowerCAmelCase :
UpperCamelCase__ = False
UpperCamelCase__ = True
UpperCamelCase__ = None
@dataclass
class __lowerCAmelCase :
UpperCamelCase__ = None
UpperCamelCase__ = field(default=__magic_name__ , metadata={'''help''': '''help message'''} )
UpperCamelCase__ = None
UpperCamelCase__ = list_field(default=[] )
UpperCamelCase__ = list_field(default=[] )
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self :List[str] , __magic_name__ :argparse.ArgumentParser , __magic_name__ :argparse.ArgumentParser ):
'''simple docstring'''
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
a = {k: v for k, v in vars(__magic_name__ ).items() if k != """container"""}
a = {k: v for k, v in vars(__magic_name__ ).items() if k != """container"""}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("""choices""" , __magic_name__ ) and yy.get("""choices""" , __magic_name__ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["""type"""](__magic_name__ ) , yy["""type"""](__magic_name__ ) )
del xx["type"], yy["type"]
self.assertEqual(__magic_name__ , __magic_name__ )
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = HfArgumentParser(__magic_name__ )
a = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument("""--bar""" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument("""--baz""" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument("""--flag""" , type=__magic_name__ , default=__magic_name__ , const=__magic_name__ , nargs="""?""" )
self.argparsersEqual(__magic_name__ , __magic_name__ )
a = ["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""]
((a ) , ) = parser.parse_args_into_dataclasses(__magic_name__ , look_for_args_file=__magic_name__ )
self.assertFalse(example.flag )
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a = HfArgumentParser(__magic_name__ )
a = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=42 , type=__magic_name__ )
expected.add_argument("""--baz""" , default="""toto""" , type=__magic_name__ , help="""help message""" )
self.argparsersEqual(__magic_name__ , __magic_name__ )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=__magic_name__ , default=__magic_name__ , const=__magic_name__ , nargs="""?""" )
expected.add_argument("""--baz""" , type=__magic_name__ , default=__magic_name__ , const=__magic_name__ , nargs="""?""" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("""--no_baz""" , action="""store_false""" , default=__magic_name__ , dest="""baz""" )
expected.add_argument("""--opt""" , type=__magic_name__ , default=__magic_name__ )
a = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__magic_name__ )
for dataclass_type in dataclass_types:
a = HfArgumentParser(__magic_name__ )
self.argparsersEqual(__magic_name__ , __magic_name__ )
a = parser.parse_args([] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
a = parser.parse_args(["""--foo""", """--no_baz"""] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
a = parser.parse_args(["""--foo""", """--baz"""] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
a = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
a = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = HfArgumentParser(__magic_name__ )
a = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=["""titi""", """toto""", 42] , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(__magic_name__ , __magic_name__ )
a = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
a = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
a = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
a = parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
a = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
a = parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
@dataclass
class __lowerCAmelCase :
UpperCamelCase__ = '''toto'''
a = HfArgumentParser(__magic_name__ )
a = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=("""titi""", """toto""", 42) , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(__magic_name__ , __magic_name__ )
a = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
a = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
a = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = HfArgumentParser(__magic_name__ )
a = argparse.ArgumentParser()
expected.add_argument("""--foo_int""" , nargs="""+""" , default=[] , type=__magic_name__ )
expected.add_argument("""--bar_int""" , nargs="""+""" , default=[1, 2, 3] , type=__magic_name__ )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=__magic_name__ )
expected.add_argument("""--foo_float""" , nargs="""+""" , default=[0.1, 0.2, 0.3] , type=__magic_name__ )
self.argparsersEqual(__magic_name__ , __magic_name__ )
a = parser.parse_args([] )
self.assertEqual(
__magic_name__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["""Hallo""", """Bonjour""", """Hello"""] , foo_float=[0.1, 0.2, 0.3] ) , )
a = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() )
self.assertEqual(__magic_name__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["""a""", """b""", """c"""] , foo_float=[0.1, 0.7] ) )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=__magic_name__ , type=__magic_name__ )
expected.add_argument("""--bar""" , default=__magic_name__ , type=__magic_name__ , help="""help message""" )
expected.add_argument("""--baz""" , default=__magic_name__ , type=__magic_name__ )
expected.add_argument("""--ces""" , nargs="""+""" , default=[] , type=__magic_name__ )
expected.add_argument("""--des""" , nargs="""+""" , default=[] , type=__magic_name__ )
a = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__magic_name__ )
for dataclass_type in dataclass_types:
a = HfArgumentParser(__magic_name__ )
self.argparsersEqual(__magic_name__ , __magic_name__ )
a = parser.parse_args([] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , bar=__magic_name__ , baz=__magic_name__ , ces=[] , des=[] ) )
a = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() )
self.assertEqual(__magic_name__ , Namespace(foo=12 , bar=3.14 , baz="""42""" , ces=["""a""", """b""", """c"""] , des=[1, 2, 3] ) )
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
a = HfArgumentParser(__magic_name__ )
a = argparse.ArgumentParser()
expected.add_argument("""--required_list""" , nargs="""+""" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument("""--required_str""" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=__magic_name__ , )
self.argparsersEqual(__magic_name__ , __magic_name__ )
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
a = HfArgumentParser(__magic_name__ )
a = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=__magic_name__ , )
expected.add_argument("""--opt""" , type=__magic_name__ , default=__magic_name__ )
expected.add_argument("""--baz""" , default="""toto""" , type=__magic_name__ , help="""help message""" )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=__magic_name__ )
self.argparsersEqual(__magic_name__ , __magic_name__ )
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
a = HfArgumentParser(__magic_name__ )
a = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
a = parser.parse_dict(__magic_name__ )[0]
a = BasicExample(**__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = HfArgumentParser(__magic_name__ )
a = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
"""extra""": 42,
}
self.assertRaises(__magic_name__ , parser.parse_dict , __magic_name__ , allow_extra_keys=__magic_name__ )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = HfArgumentParser(__magic_name__ )
a = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
a = os.path.join(__magic_name__ , """temp_json""" )
os.mkdir(__magic_name__ )
with open(temp_local_path + """.json""" , """w+""" ) as f:
json.dump(__magic_name__ , __magic_name__ )
a = parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0]
a = BasicExample(**__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a = HfArgumentParser(__magic_name__ )
a = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
a = os.path.join(__magic_name__ , """temp_yaml""" )
os.mkdir(__magic_name__ )
with open(temp_local_path + """.yaml""" , """w+""" ) as f:
yaml.dump(__magic_name__ , __magic_name__ )
a = parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0]
a = BasicExample(**__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
a = HfArgumentParser(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
| 356 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : int = {
"shi-labs/nat-mini-in1k-224": "https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
UpperCamelCase__ = '''nat'''
UpperCamelCase__ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self :Any , __magic_name__ :int=4 , __magic_name__ :Dict=3 , __magic_name__ :List[str]=64 , __magic_name__ :Optional[int]=[3, 4, 6, 5] , __magic_name__ :int=[2, 4, 8, 16] , __magic_name__ :str=7 , __magic_name__ :Tuple=3.0 , __magic_name__ :Dict=True , __magic_name__ :List[Any]=0.0 , __magic_name__ :List[Any]=0.0 , __magic_name__ :List[Any]=0.1 , __magic_name__ :Optional[Any]="gelu" , __magic_name__ :Optional[Any]=0.02 , __magic_name__ :Tuple=1E-5 , __magic_name__ :Union[str, Any]=0.0 , __magic_name__ :int=None , __magic_name__ :Any=None , **__magic_name__ :Dict , ):
'''simple docstring'''
super().__init__(**__magic_name__ )
a = patch_size
a = num_channels
a = embed_dim
a = depths
a = len(__magic_name__ )
a = num_heads
a = kernel_size
a = mlp_ratio
a = qkv_bias
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = drop_path_rate
a = hidden_act
a = layer_norm_eps
a = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
a = int(embed_dim * 2 ** (len(__magic_name__ ) - 1) )
a = layer_scale_init_value
a = ["""stem"""] + [F'stage{idx}' for idx in range(1 , len(__magic_name__ ) + 1 )]
a , a = get_aligned_output_features_output_indices(
out_features=__magic_name__ , out_indices=__magic_name__ , stage_names=self.stage_names )
| 347 | 0 |
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
_A = 6_378_137.0
_A = 6_356_752.314_245
_A = 637_8137
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ):
__UpperCamelCase =(AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
__UpperCamelCase =atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE__ ) ) )
__UpperCamelCase =atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE__ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
__UpperCamelCase =haversine_distance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
__UpperCamelCase =(b_lata + b_lata) / 2
__UpperCamelCase =(b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
__UpperCamelCase =(sin(SCREAMING_SNAKE_CASE__ ) ** 2) * (cos(SCREAMING_SNAKE_CASE__ ) ** 2)
__UpperCamelCase =cos(sigma / 2 ) ** 2
__UpperCamelCase =(sigma - sin(SCREAMING_SNAKE_CASE__ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
__UpperCamelCase =(cos(SCREAMING_SNAKE_CASE__ ) ** 2) * (sin(SCREAMING_SNAKE_CASE__ ) ** 2)
__UpperCamelCase =sin(sigma / 2 ) ** 2
__UpperCamelCase =(sigma + sin(SCREAMING_SNAKE_CASE__ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 62 |
from __future__ import annotations
def _a ( lowerCamelCase: list[int | str] ) -> None:
'''simple docstring'''
create_state_space_tree(lowerCamelCase , [] , 0 , [0 for i in range(len(lowerCamelCase ) )] )
def _a ( lowerCamelCase: list[int | str] , lowerCamelCase: list[int | str] , lowerCamelCase: int , lowerCamelCase: list[int] , ) -> None:
'''simple docstring'''
if index == len(lowerCamelCase ):
print(lowerCamelCase )
return
for i in range(len(lowerCamelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
__A = True
create_state_space_tree(lowerCamelCase , lowerCamelCase , index + 1 , lowerCamelCase )
current_sequence.pop()
__A = False
snake_case__ : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
snake_case__ : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 117 | 0 |
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> bool:
lowercase__ : Union[str, Any] = len(__lowerCamelCase ) + 1
lowercase__ : List[str] = len(__lowerCamelCase ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
lowercase__ : List[Any] = [[0 for i in range(__lowerCamelCase )] for j in range(__lowerCamelCase )]
# since string of zero length match pattern of zero length
lowercase__ : Dict = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , __lowerCamelCase ):
lowercase__ : Optional[int] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , __lowerCamelCase ):
lowercase__ : List[Any] = dp[0][j - 2] if pattern[j - 1] == '''*''' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , __lowerCamelCase ):
for j in range(1 , __lowerCamelCase ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
lowercase__ : int = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
lowercase__ : int = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
lowercase__ : Dict = dp[i - 1][j]
else:
lowercase__ : Union[str, Any] = 0
else:
lowercase__ : List[Any] = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
lowerCAmelCase_ = 'aab'
lowerCAmelCase_ = 'c*a*b'
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F'''{input_string} matches the given pattern {pattern}''')
else:
print(F'''{input_string} does not match with the given pattern {pattern}''')
| 302 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {
'configuration_roberta': ['ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RobertaConfig', 'RobertaOnnxConfig'],
'tokenization_roberta': ['RobertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['RobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaForCausalLM',
'RobertaForMaskedLM',
'RobertaForMultipleChoice',
'RobertaForQuestionAnswering',
'RobertaForSequenceClassification',
'RobertaForTokenClassification',
'RobertaModel',
'RobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaForCausalLM',
'TFRobertaForMaskedLM',
'TFRobertaForMultipleChoice',
'TFRobertaForQuestionAnswering',
'TFRobertaForSequenceClassification',
'TFRobertaForTokenClassification',
'TFRobertaMainLayer',
'TFRobertaModel',
'TFRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'FlaxRobertaForCausalLM',
'FlaxRobertaForMaskedLM',
'FlaxRobertaForMultipleChoice',
'FlaxRobertaForQuestionAnswering',
'FlaxRobertaForSequenceClassification',
'FlaxRobertaForTokenClassification',
'FlaxRobertaModel',
'FlaxRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 302 | 1 |
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def A_ ( a=None , a=None ):
"""simple docstring"""
return field(default_factory=lambda: default , metadata=a )
@dataclass
class _A :
SCREAMING_SNAKE_CASE : str = field(
metadata={'''help''': '''The csv file to plot.'''} , )
SCREAMING_SNAKE_CASE : bool = field(
default=__magic_name__ , metadata={'''help''': '''Whether to plot along batch size or sequence length. Defaults to sequence length.'''} , )
SCREAMING_SNAKE_CASE : bool = field(
default=__magic_name__ , metadata={'''help''': '''Whether the csv file has time results or memory results. Defaults to memory results.'''} , )
SCREAMING_SNAKE_CASE : bool = field(
default=__magic_name__ , metadata={'''help''': '''Disable logarithmic scale when plotting'''} , )
SCREAMING_SNAKE_CASE : bool = field(
default=__magic_name__ , metadata={
'''help''': '''Whether the csv file has training results or inference results. Defaults to inference results.'''
} , )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'''help''': '''Filename under which the plot will be saved. If unused no plot is saved.'''} , )
SCREAMING_SNAKE_CASE : Optional[List[str]] = list_field(
default=__magic_name__ , metadata={'''help''': '''List of model names that are used instead of the ones in the csv file.'''})
def A_ ( a ):
"""simple docstring"""
try:
int(a )
return True
except ValueError:
return False
def A_ ( a ):
"""simple docstring"""
try:
float(a )
return True
except ValueError:
return False
class _A :
def __init__( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = args
SCREAMING_SNAKE_CASE_ : Any = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='' ) as csv_file:
SCREAMING_SNAKE_CASE_ : Optional[Any] = csv.DictReader(_SCREAMING_SNAKE_CASE )
for row in reader:
SCREAMING_SNAKE_CASE_ : Optional[Any] = row['model']
self.result_dict[model_name]["bsz"].append(int(row['batch_size'] ) )
self.result_dict[model_name]["seq_len"].append(int(row['sequence_length'] ) )
if can_convert_to_int(row['result'] ):
# value is not None
SCREAMING_SNAKE_CASE_ : Optional[int] = int(row['result'] )
elif can_convert_to_float(row['result'] ):
# value is not None
SCREAMING_SNAKE_CASE_ : int = float(row['result'] )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = plt.subplots()
SCREAMING_SNAKE_CASE_ : List[Any] = 'Time usage' if self.args.is_time else 'Memory usage'
SCREAMING_SNAKE_CASE_ : Optional[Any] = title_str + ' for training' if self.args.is_train else title_str + ' for inference'
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('log' )
ax.set_yscale('log' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = sorted(set(self.result_dict[model_name]['bsz'] ) )
SCREAMING_SNAKE_CASE_ : str = sorted(set(self.result_dict[model_name]['seq_len'] ) )
SCREAMING_SNAKE_CASE_ : Any = self.result_dict[model_name]['result']
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)) : str = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
SCREAMING_SNAKE_CASE_ : Optional[int] = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
SCREAMING_SNAKE_CASE_ : List[str] = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=_SCREAMING_SNAKE_CASE , )
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)) : Union[str, Any] = (
('batch_size', 'len') if self.args.plot_along_batch else ('in #tokens', 'bsz')
)
SCREAMING_SNAKE_CASE_ : Optional[int] = np.asarray(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )[: len(_SCREAMING_SNAKE_CASE )]
plt.scatter(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , label=f"{label_model_name} - {inner_loop_label}: {inner_loop_value}" )
plt.plot(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , '--' )
title_str += f" {label_model_name} vs."
SCREAMING_SNAKE_CASE_ : Any = title_str[:-4]
SCREAMING_SNAKE_CASE_ : Tuple = 'Time in s' if self.args.is_time else 'Memory in MB'
# plot
plt.title(_SCREAMING_SNAKE_CASE )
plt.xlabel(_SCREAMING_SNAKE_CASE )
plt.ylabel(_SCREAMING_SNAKE_CASE )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def A_ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = HfArgumentParser(a )
SCREAMING_SNAKE_CASE_ : Dict = parser.parse_args_into_dataclasses()[0]
SCREAMING_SNAKE_CASE_ : Any = Plot(args=a )
plot.plot()
if __name__ == "__main__":
main()
| 253 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase : Dict = {
'configuration_layoutlmv2': ['LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv2Config'],
'processing_layoutlmv2': ['LayoutLMv2Processor'],
'tokenization_layoutlmv2': ['LayoutLMv2Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = ['LayoutLMv2TokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Any = ['LayoutLMv2FeatureExtractor']
lowerCAmelCase : int = ['LayoutLMv2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] = [
'LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv2ForQuestionAnswering',
'LayoutLMv2ForSequenceClassification',
'LayoutLMv2ForTokenClassification',
'LayoutLMv2Layer',
'LayoutLMv2Model',
'LayoutLMv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 253 | 1 |
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
snake_case = np.full((len(__lowerCAmelCase ), sequence_length, 2) ,__lowerCAmelCase )
else:
snake_case = np.full((len(__lowerCAmelCase ), sequence_length) ,__lowerCAmelCase )
for i, tensor in enumerate(__lowerCAmelCase ):
if padding_side == "right":
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
snake_case = tensor[:sequence_length]
else:
snake_case = tensor[:sequence_length]
else:
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
snake_case = tensor[:sequence_length]
else:
snake_case = tensor[:sequence_length]
return out_tensor.tolist()
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
snake_case = ord(__lowerCAmelCase )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
snake_case = unicodedata.category(__lowerCAmelCase )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class A__ ( UpperCamelCase_ ):
"""simple docstring"""
__magic_name__ = 42
__magic_name__ = True
__magic_name__ = None
__magic_name__ = None
__magic_name__ = -1_00
__magic_name__ = "pt"
def a_ ( self , __snake_case ):
import torch
snake_case = """label""" if """label""" in features[0].keys() else """labels"""
snake_case = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
snake_case = self.tokenizer.pad(
_a , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
snake_case = torch.tensor(batch['''entity_ids'''] ).shape[1]
snake_case = self.tokenizer.padding_side
if padding_side == "right":
snake_case = [
list(_a ) + [self.label_pad_token_id] * (sequence_length - len(_a )) for label in labels
]
else:
snake_case = [
[self.label_pad_token_id] * (sequence_length - len(_a )) + list(_a ) for label in labels
]
snake_case = [feature["""ner_tags"""] for feature in features]
snake_case = padding_tensor(_a , -1 , _a , _a )
snake_case = [feature["""original_entity_spans"""] for feature in features]
snake_case = padding_tensor(_a , (-1, -1) , _a , _a )
snake_case = {k: torch.tensor(_a , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 362 |
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[int] = {
"google/efficientnet-b7": "https://huggingface.co/google/efficientnet-b7/resolve/main/config.json",
}
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = 'efficientnet'
def __init__( self , __snake_case = 3 , __snake_case = 6_0_0 , __snake_case = 2.0 , __snake_case = 3.1 , __snake_case = 8 , __snake_case = [3, 3, 5, 3, 5, 5, 3] , __snake_case = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , __snake_case = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , __snake_case = [] , __snake_case = [1, 2, 2, 2, 1, 2, 1] , __snake_case = [1, 2, 2, 3, 3, 4, 1] , __snake_case = [1, 6, 6, 6, 6, 6, 6] , __snake_case = 0.25 , __snake_case = "swish" , __snake_case = 2_5_6_0 , __snake_case = "mean" , __snake_case = 0.02 , __snake_case = 0.001 , __snake_case = 0.99 , __snake_case = 0.5 , __snake_case = 0.2 , **__snake_case , ):
super().__init__(**__snake_case )
snake_case = num_channels
snake_case = image_size
snake_case = width_coefficient
snake_case = depth_coefficient
snake_case = depth_divisor
snake_case = kernel_sizes
snake_case = in_channels
snake_case = out_channels
snake_case = depthwise_padding
snake_case = strides
snake_case = num_block_repeats
snake_case = expand_ratios
snake_case = squeeze_expansion_ratio
snake_case = hidden_act
snake_case = hidden_dim
snake_case = pooling_type
snake_case = initializer_range
snake_case = batch_norm_eps
snake_case = batch_norm_momentum
snake_case = dropout_rate
snake_case = drop_connect_rate
snake_case = sum(__snake_case ) * 4
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = version.parse('1.11' )
@property
def a_ ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def a_ ( self ):
return 1E-5
| 213 | 0 |
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
a_ : Tuple = HUGGINGFACE_HUB_CACHE
a_ : Dict = """config.json"""
a_ : List[str] = """diffusion_pytorch_model.bin"""
a_ : Dict = """diffusion_flax_model.msgpack"""
a_ : str = """model.onnx"""
a_ : str = """diffusion_pytorch_model.safetensors"""
a_ : Any = """weights.pb"""
a_ : Optional[Any] = """https://huggingface.co"""
a_ : Union[str, Any] = default_cache_path
a_ : Optional[int] = """diffusers_modules"""
a_ : int = os.getenv("""HF_MODULES_CACHE""", os.path.join(hf_cache_home, """modules"""))
a_ : Union[str, Any] = ["""fp16""", """non-ema"""]
a_ : str = """.self_attn"""
| 75 |
"""simple docstring"""
from statistics import mean
import numpy as np
def __A ( a_ :list , a_ :list , a_ :list , a_ :int) -> list:
__a : Any = 0
# Number of processes finished
__a : Union[str, Any] = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
__a : Any = [0] * no_of_process
# List to include calculation results
__a : str = [0] * no_of_process
# Sort by arrival time.
__a : List[Any] = [burst_time[i] for i in np.argsort(a_)]
__a : Tuple = [process_name[i] for i in np.argsort(a_)]
arrival_time.sort()
while no_of_process > finished_process_count:
__a : Optional[Any] = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
__a : Dict = arrival_time[i]
__a : Dict = 0
# Index showing the location of the process being performed
__a : Tuple = 0
# Saves the current response ratio.
__a : List[str] = 0
for i in range(0 , a_):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
__a : Tuple = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
__a : Tuple = temp
__a : Optional[Any] = i
# Calculate the turn around time
__a : Optional[int] = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
__a : int = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def __A ( a_ :list , a_ :list , a_ :list , a_ :int) -> list:
__a : Dict = [0] * no_of_process
for i in range(0 , a_):
__a : Optional[Any] = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
A = 5
A = ['''A''', '''B''', '''C''', '''D''', '''E''']
A = [1, 2, 3, 4, 5]
A = [1, 2, 3, 4, 5]
A = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
A = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print('''Process name \tArrival time \tBurst time \tTurn around time \tWaiting time''')
for i in range(0, no_of_process):
print(
F'{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'
F'{turn_around_time[i]}\t\t\t{waiting_time[i]}'
)
print(F'average waiting time : {mean(waiting_time):.5f}')
print(F'average turn around time : {mean(turn_around_time):.5f}') | 160 | 0 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = "▁"
lowerCamelCase : Optional[int] = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
"tokenizer_config_file": "tokenizer_config.json",
}
lowerCamelCase : Any = {
"vocab_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json",
},
"spm_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_config_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json",
},
}
lowerCamelCase : Dict = {
"facebook/m2m100_418M": 1_0_2_4,
}
# fmt: off
lowerCamelCase : str = {
"m2m100": ["af", "am", "ar", "ast", "az", "ba", "be", "bg", "bn", "br", "bs", "ca", "ceb", "cs", "cy", "da", "de", "el", "en", "es", "et", "fa", "ff", "fi", "fr", "fy", "ga", "gd", "gl", "gu", "ha", "he", "hi", "hr", "ht", "hu", "hy", "id", "ig", "ilo", "is", "it", "ja", "jv", "ka", "kk", "km", "kn", "ko", "lb", "lg", "ln", "lo", "lt", "lv", "mg", "mk", "ml", "mn", "mr", "ms", "my", "ne", "nl", "no", "ns", "oc", "or", "pa", "pl", "ps", "pt", "ro", "ru", "sd", "si", "sk", "sl", "so", "sq", "sr", "ss", "su", "sv", "sw", "ta", "th", "tl", "tn", "tr", "uk", "ur", "uz", "vi", "wo", "xh", "yi", "yo", "zh", "zu"],
"wmt21": ["en", "ha", "is", "ja", "cs", "ru", "zh", "de"]
}
class A__ ( A__ ):
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = ['input_ids', 'attention_mask']
A__ = []
A__ = []
def __init__( self : Optional[int] , _a : Dict , _a : List[str] , _a : str=None , _a : List[str]=None , _a : Union[str, Any]="<s>" , _a : Dict="</s>" , _a : int="</s>" , _a : List[str]="<pad>" , _a : Optional[Any]="<unk>" , _a : Tuple="m2m100" , _a : Optional[Dict[str, Any]] = None , _a : Dict=8 , **_a : int , ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE ={} if sp_model_kwargs is None else sp_model_kwargs
_SCREAMING_SNAKE_CASE =language_codes
_SCREAMING_SNAKE_CASE =FAIRSEQ_LANGUAGE_CODES[language_codes]
_SCREAMING_SNAKE_CASE ={lang_code: f"__{lang_code}__" for lang_code in fairseq_language_code}
_SCREAMING_SNAKE_CASE =kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(_a )
for lang_code in fairseq_language_code
if self.get_lang_token(_a ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=_a , tgt_lang=_a , bos_token=_a , eos_token=_a , sep_token=_a , unk_token=_a , pad_token=_a , language_codes=_a , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=_a , **_a , )
_SCREAMING_SNAKE_CASE =vocab_file
_SCREAMING_SNAKE_CASE =load_json(_a )
_SCREAMING_SNAKE_CASE ={v: k for k, v in self.encoder.items()}
_SCREAMING_SNAKE_CASE =spm_file
_SCREAMING_SNAKE_CASE =load_spm(_a , self.sp_model_kwargs )
_SCREAMING_SNAKE_CASE =len(self.encoder )
_SCREAMING_SNAKE_CASE ={
self.get_lang_token(_a ): self.encoder_size + i for i, lang_code in enumerate(_a )
}
_SCREAMING_SNAKE_CASE ={lang_code: self.encoder_size + i for i, lang_code in enumerate(_a )}
_SCREAMING_SNAKE_CASE ={v: k for k, v in self.lang_token_to_id.items()}
_SCREAMING_SNAKE_CASE =src_lang if src_lang is not None else 'en'
_SCREAMING_SNAKE_CASE =tgt_lang
_SCREAMING_SNAKE_CASE =self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
_SCREAMING_SNAKE_CASE =num_madeup_words
@property
def A ( self : List[Any] ) -> int:
'''simple docstring'''
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def A ( self : int ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def A ( self : List[Any] , _a : str ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def A ( self : Optional[int] , _a : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(_a , out_type=_a )
def A ( self : Optional[int] , _a : Dict ) -> Optional[Any]:
'''simple docstring'''
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(_a , self.encoder[self.unk_token] )
def A ( self : Dict , _a : int ) -> str:
'''simple docstring'''
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(_a , self.unk_token )
def A ( self : Union[str, Any] , _a : Union[str, Any] ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_a ) + token
_SCREAMING_SNAKE_CASE =[]
else:
current_sub_tokens.append(_a )
out_string += self.sp_model.decode(_a )
return out_string.strip()
def A ( self : List[Any] , _a : List[int] , _a : Optional[List[int]] = None , _a : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
_SCREAMING_SNAKE_CASE =[1] * len(self.prefix_tokens )
_SCREAMING_SNAKE_CASE =[1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_a )) + suffix_ones
return prefix_ones + ([0] * len(_a )) + ([0] * len(_a )) + suffix_ones
def A ( self : Optional[int] , _a : List[int] , _a : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def A ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE ={self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.__dict__.copy()
_SCREAMING_SNAKE_CASE =None
return state
def __setstate__( self : List[str] , _a : Dict ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE =load_spm(self.spm_file , self.sp_model_kwargs )
def A ( self : int , _a : str , _a : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =Path(_a )
if not save_dir.is_dir():
raise OSError(f"{save_directory} should be a directory" )
_SCREAMING_SNAKE_CASE =save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
_SCREAMING_SNAKE_CASE =save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , _a )
if os.path.abspath(self.spm_file ) != os.path.abspath(_a ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _a )
elif not os.path.isfile(self.spm_file ):
with open(_a , 'wb' ) as fi:
_SCREAMING_SNAKE_CASE =self.sp_model.serialized_model_proto()
fi.write(_a )
return (str(_a ), str(_a ))
def A ( self : List[Any] , _a : List[str] , _a : str = "en" , _a : Optional[List[str]] = None , _a : str = "ro" , **_a : Optional[Any] , ) -> BatchEncoding:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =src_lang
_SCREAMING_SNAKE_CASE =tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(_a , _a , **_a )
def A ( self : List[Any] , _a : Union[str, Any] , _a : Optional[str] , _a : Optional[str] , **_a : List[Any] ) -> Optional[int]:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
_SCREAMING_SNAKE_CASE =src_lang
_SCREAMING_SNAKE_CASE =self(_a , add_special_tokens=_a , **_a )
_SCREAMING_SNAKE_CASE =self.get_lang_id(_a )
_SCREAMING_SNAKE_CASE =tgt_lang_id
return inputs
def A ( self : Dict ) -> Tuple:
'''simple docstring'''
self.set_src_lang_special_tokens(self.src_lang )
def A ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
self.set_tgt_lang_special_tokens(self.tgt_lang )
def A ( self : List[Any] , _a : str ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.get_lang_token(_a )
_SCREAMING_SNAKE_CASE =self.lang_token_to_id[lang_token]
_SCREAMING_SNAKE_CASE =[self.cur_lang_id]
_SCREAMING_SNAKE_CASE =[self.eos_token_id]
def A ( self : str , _a : str ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.get_lang_token(_a )
_SCREAMING_SNAKE_CASE =self.lang_token_to_id[lang_token]
_SCREAMING_SNAKE_CASE =[self.cur_lang_id]
_SCREAMING_SNAKE_CASE =[self.eos_token_id]
def A ( self : Optional[Any] , _a : str ) -> str:
'''simple docstring'''
return self.lang_code_to_token[lang]
def A ( self : Tuple , _a : str ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.get_lang_token(_a )
return self.lang_token_to_id[lang_token]
def _lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =sentencepiece.SentencePieceProcessor(**_UpperCamelCase )
spm.Load(str(_UpperCamelCase ) )
return spm
def _lowerCAmelCase ( _UpperCamelCase : str ) -> Union[Dict, List]:
"""simple docstring"""
with open(_UpperCamelCase , 'r' ) as f:
return json.load(_UpperCamelCase )
def _lowerCAmelCase ( _UpperCamelCase : Dict , _UpperCamelCase : str ) -> None:
"""simple docstring"""
with open(_UpperCamelCase , 'w' ) as f:
json.dump(_UpperCamelCase , _UpperCamelCase , indent=2 )
| 114 |
'''simple docstring'''
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
lowerCamelCase : int = logging.getLogger(__name__)
if __name__ == "__main__":
lowerCamelCase : Optional[Any] = argparse.ArgumentParser(
description="Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"
)
parser.add_argument(
"--data_file", type=str, default="data/dump.bert-base-uncased.pickle", help="The binarized dataset."
)
parser.add_argument(
"--token_counts_dump", type=str, default="data/token_counts.bert-base-uncased.pickle", help="The dump file."
)
parser.add_argument("--vocab_size", default=3_0_5_2_2, type=int)
lowerCamelCase : Optional[Any] = parser.parse_args()
logger.info(f'''Loading data from {args.data_file}''')
with open(args.data_file, "rb") as fp:
lowerCamelCase : Optional[int] = pickle.load(fp)
logger.info("Counting occurrences for MLM.")
lowerCamelCase : Dict = Counter()
for tk_ids in data:
counter.update(tk_ids)
lowerCamelCase : Tuple = [0] * args.vocab_size
for k, v in counter.items():
lowerCamelCase : Any = v
logger.info(f'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, "wb") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 114 | 1 |
'''simple docstring'''
import baseaa
def __snake_case( _lowerCAmelCase ) -> bytes:
return baseaa.baaencode(string.encode("""utf-8""" ) )
def __snake_case( _lowerCAmelCase ) -> str:
return baseaa.baadecode(_lowerCAmelCase ).decode("""utf-8""" )
if __name__ == "__main__":
__a = "Hello World!"
__a = baseaa_encode(test)
print(encoded)
__a = baseaa_decode(encoded)
print(decoded)
| 35 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class UpperCAmelCase__ :
__SCREAMING_SNAKE_CASE = PegasusConfig
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = '''gelu'''
def __init__( self , lowercase , lowercase=1_3 , lowercase=7 , lowercase=True , lowercase=False , lowercase=9_9 , lowercase=3_2 , lowercase=2 , lowercase=4 , lowercase=3_7 , lowercase=0.1 , lowercase=0.1 , lowercase=4_0 , lowercase=2 , lowercase=1 , lowercase=0 , ) -> Any:
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = eos_token_id
__UpperCamelCase = pad_token_id
__UpperCamelCase = bos_token_id
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__UpperCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__UpperCamelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__UpperCamelCase = prepare_pegasus_inputs_dict(lowercase , lowercase , lowercase )
return config, inputs_dict
def __lowerCamelCase ( self , lowercase , lowercase ) -> Union[str, Any]:
__UpperCamelCase = TFPegasusModel(config=lowercase ).get_decoder()
__UpperCamelCase = inputs_dict["""input_ids"""]
__UpperCamelCase = input_ids[:1, :]
__UpperCamelCase = inputs_dict["""attention_mask"""][:1, :]
__UpperCamelCase = inputs_dict["""head_mask"""]
__UpperCamelCase = 1
# first forward pass
__UpperCamelCase = model(lowercase , attention_mask=lowercase , head_mask=lowercase , use_cache=lowercase )
__UpperCamelCase , __UpperCamelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__UpperCamelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__UpperCamelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
__UpperCamelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__UpperCamelCase = model(lowercase , attention_mask=lowercase )[0]
__UpperCamelCase = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__UpperCamelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx]
__UpperCamelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase , lowercase , rtol=1E-3 )
def _lowercase ( __A ,__A ,__A ,__A=None ,__A=None ,__A=None ,__A=None ,__A=None ,):
'''simple docstring'''
if attention_mask is None:
__UpperCamelCase = tf.cast(tf.math.not_equal(__A ,config.pad_token_id ) ,tf.inta )
if decoder_attention_mask is None:
__UpperCamelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ),
] ,axis=-1 ,)
if head_mask is None:
__UpperCamelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase):
__SCREAMING_SNAKE_CASE = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = TFPegasusModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=lowercase )
def __lowerCamelCase ( self ) -> str:
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ) -> Tuple:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase )
@require_sentencepiece
@require_tokenizers
@require_tf
class UpperCAmelCase__ ( unittest.TestCase):
__SCREAMING_SNAKE_CASE = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
__SCREAMING_SNAKE_CASE = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
__SCREAMING_SNAKE_CASE = '''google/pegasus-xsum'''
@cached_property
def __lowerCamelCase ( self ) -> int:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __lowerCamelCase ( self , **lowercase ) -> Optional[int]:
__UpperCamelCase = self.translate_src_text(**lowercase )
assert self.expected_text == generated_words
def __lowerCamelCase ( self , **lowercase ) -> Optional[Any]:
__UpperCamelCase = self.tokenizer(self.src_text , **lowercase , padding=lowercase , return_tensors="""tf""" )
__UpperCamelCase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowercase , )
__UpperCamelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowercase )
return generated_words
@slow
def __lowerCamelCase ( self ) -> Dict:
self._assert_generated_batch_equal_expected()
| 349 | 0 |
from sklearn.metrics import mean_squared_error
import datasets
__UpperCAmelCase = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
__UpperCAmelCase = """\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
"""
__UpperCAmelCase = """
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
\"raw_values\" : Returns a full set of errors in case of multioutput input.
\"uniform_average\" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric(\"mse\")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{'mse': 0.6123724356957945}
If you're using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mse': array([0.41666667, 1. ])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"""
] , )
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
"""simple docstring"""
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("""float""" ) ),
"references": datasets.Sequence(datasets.Value("""float""" ) ),
}
else:
return {
"predictions": datasets.Value("""float""" ),
"references": datasets.Value("""float""" ),
}
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[Any]="uniform_average" , lowerCAmelCase : Optional[Any]=True ) -> int:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = mean_squared_error(
lowerCAmelCase , lowerCAmelCase , sample_weight=lowerCAmelCase , multioutput=lowerCAmelCase , squared=lowerCAmelCase )
return {"mse": mse}
| 139 |
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def snake_case_ (__A : Optional[int] , __A : Any ) -> Any:
__lowerCAmelCase : Union[str, Any] = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''encoder.deit.blocks.{i}.norm1.weight''', f'''encoder.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''encoder.deit.blocks.{i}.norm1.bias''', f'''encoder.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.attn.proj.weight''', f'''encoder.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.attn.proj.bias''', f'''encoder.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.norm2.weight''', f'''encoder.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''encoder.deit.blocks.{i}.norm2.bias''', f'''encoder.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.mlp.fc1.weight''', f'''encoder.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.mlp.fc1.bias''', f'''encoder.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.mlp.fc2.weight''', f'''encoder.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''encoder.deit.blocks.{i}.mlp.fc2.bias''', f'''encoder.encoder.layer.{i}.output.dense.bias''') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""),
("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""),
("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""),
("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""),
("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""),
("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""),
] )
return rename_keys
def snake_case_ (__A : List[str] , __A : str ) -> Optional[Any]:
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
__lowerCAmelCase : Optional[Any] = state_dict.pop(f'''encoder.deit.blocks.{i}.attn.qkv.weight''' )
__lowerCAmelCase : Tuple = in_proj_weight[
: encoder_config.hidden_size, :
]
__lowerCAmelCase : str = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
__lowerCAmelCase : str = in_proj_weight[
-encoder_config.hidden_size :, :
]
def snake_case_ (__A : Union[str, Any] , __A : str , __A : Optional[Any] ) -> Optional[Any]:
__lowerCAmelCase : Any = dct.pop(__A )
__lowerCAmelCase : str = val
def snake_case_ (__A : int ) -> Tuple:
if "handwritten" in checkpoint_url:
__lowerCAmelCase : Tuple = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
__lowerCAmelCase : Optional[Any] = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"""
__lowerCAmelCase : Dict = Image.open(requests.get(__A , stream=__A ).raw ).convert("""RGB""" )
return im
@torch.no_grad()
def snake_case_ (__A : Any , __A : Union[str, Any] ) -> Optional[int]:
__lowerCAmelCase : List[Any] = ViTConfig(image_size=3_8_4 , qkv_bias=__A )
__lowerCAmelCase : List[Any] = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
__lowerCAmelCase : Union[str, Any] = 7_6_8
elif "large" in checkpoint_url:
# use ViT-large encoder
__lowerCAmelCase : Any = 1_0_2_4
__lowerCAmelCase : Any = 4_0_9_6
__lowerCAmelCase : Optional[int] = 2_4
__lowerCAmelCase : str = 1_6
__lowerCAmelCase : List[Any] = 1_0_2_4
else:
raise ValueError("""Should either find 'base' or 'large' in checkpoint URL""" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
__lowerCAmelCase : Tuple = False
__lowerCAmelCase : Union[str, Any] = """relu"""
__lowerCAmelCase : List[Any] = 1_0_2_4
__lowerCAmelCase : Any = True
__lowerCAmelCase : List[Any] = False
__lowerCAmelCase : Dict = False
# load HuggingFace model
__lowerCAmelCase : Dict = ViTModel(__A , add_pooling_layer=__A )
__lowerCAmelCase : Union[str, Any] = TrOCRForCausalLM(__A )
__lowerCAmelCase : Any = VisionEncoderDecoderModel(encoder=__A , decoder=__A )
model.eval()
# load state_dict of original model, rename some keys
__lowerCAmelCase : Union[str, Any] = torch.hub.load_state_dict_from_url(__A , map_location="""cpu""" , check_hash=__A )["""model"""]
__lowerCAmelCase : Any = create_rename_keys(__A , __A )
for src, dest in rename_keys:
rename_key(__A , __A , __A )
read_in_q_k_v(__A , __A )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
__lowerCAmelCase : Tuple = state_dict.pop(__A )
if key.startswith("""decoder""" ) and "output_projection" not in key:
__lowerCAmelCase : str = val
else:
__lowerCAmelCase : Tuple = val
# load state dict
model.load_state_dict(__A )
# Check outputs on an image
__lowerCAmelCase : List[Any] = ViTImageProcessor(size=encoder_config.image_size )
__lowerCAmelCase : List[str] = RobertaTokenizer.from_pretrained("""roberta-large""" )
__lowerCAmelCase : List[Any] = TrOCRProcessor(__A , __A )
__lowerCAmelCase : List[str] = processor(images=prepare_img(__A ) , return_tensors="""pt""" ).pixel_values
# verify logits
__lowerCAmelCase : List[str] = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
__lowerCAmelCase : List[str] = model(pixel_values=__A , decoder_input_ids=__A )
__lowerCAmelCase : Optional[Any] = outputs.logits
__lowerCAmelCase : Union[str, Any] = torch.Size([1, 1, 5_0_2_6_5] )
if "trocr-base-handwritten" in checkpoint_url:
__lowerCAmelCase : Dict = torch.tensor(
[-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] )
elif "trocr-large-handwritten" in checkpoint_url:
__lowerCAmelCase : List[Any] = torch.tensor(
[-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] )
elif "trocr-base-printed" in checkpoint_url:
__lowerCAmelCase : Tuple = torch.tensor(
[-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] )
elif "trocr-large-printed" in checkpoint_url:
__lowerCAmelCase : List[Any] = torch.tensor(
[-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :1_0] , __A , atol=1e-3 ), "First elements of logits not as expected"
Path(__A ).mkdir(exist_ok=__A )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__A )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(__A )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
__UpperCAmelCase = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 139 | 1 |
"""simple docstring"""
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
A: Any = get_logger(__name__)
A: Tuple = R"\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n"
class SCREAMING_SNAKE_CASE__ :
@add_start_docstrings(_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> jnp.ndarray:
'''simple docstring'''
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
class SCREAMING_SNAKE_CASE__ :
@add_start_docstrings(_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> jnp.ndarray:
'''simple docstring'''
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
@add_start_docstrings(_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> jnp.ndarray:
'''simple docstring'''
for processor in self:
UpperCAmelCase : List[Any] = inspect.signature(processor.__call__ ).parameters
if len(_SCREAMING_SNAKE_CASE ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F"Make sure that all the required parameters: {list(function_args.keys() )} for "
F"{processor.__class__} are passed to the logits processor." )
UpperCAmelCase : Dict = processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase : Any = processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return scores
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
def __init__( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not (temperature > 0):
raise ValueError(F"`temperature` has to be a strictly positive float, but is {temperature}" )
UpperCAmelCase : str = temperature
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> jnp.ndarray:
'''simple docstring'''
UpperCAmelCase : List[Any] = scores / self.temperature
return scores
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = -float("""Inf""" ) , _SCREAMING_SNAKE_CASE = 1 ) -> List[str]:
'''simple docstring'''
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F"`top_p` has to be a float > 0 and < 1, but is {top_p}" )
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or (min_tokens_to_keep < 1):
raise ValueError(F"`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}" )
UpperCAmelCase : int = top_p
UpperCAmelCase : Optional[int] = filter_value
UpperCAmelCase : Optional[int] = min_tokens_to_keep
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> jnp.ndarray:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Tuple = lax.top_k(_SCREAMING_SNAKE_CASE , scores.shape[-1] )
UpperCAmelCase : Tuple = jnp.full_like(_SCREAMING_SNAKE_CASE , self.filter_value )
UpperCAmelCase : Tuple = jax.nn.softmax(_SCREAMING_SNAKE_CASE , axis=-1 ).cumsum(axis=-1 )
UpperCAmelCase : Optional[Any] = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
UpperCAmelCase : List[str] = jnp.roll(_SCREAMING_SNAKE_CASE , 1 )
score_mask |= score_mask.at[:, 0].set(_SCREAMING_SNAKE_CASE )
# min tokens to keep
UpperCAmelCase : Tuple = score_mask.at[:, : self.min_tokens_to_keep].set(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = jnp.where(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = jax.lax.sort_key_val(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )[-1]
return next_scores
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = -float("""Inf""" ) , _SCREAMING_SNAKE_CASE = 1 ) -> Any:
'''simple docstring'''
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or top_k <= 0:
raise ValueError(F"`top_k` has to be a strictly positive integer, but is {top_k}" )
UpperCAmelCase : Optional[int] = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = filter_value
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> jnp.ndarray:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : List[Any] = scores.shape
UpperCAmelCase : Optional[int] = jnp.full(batch_size * vocab_size , self.filter_value )
UpperCAmelCase : int = min(self.top_k , scores.shape[-1] ) # Safety check
UpperCAmelCase , UpperCAmelCase : List[str] = lax.top_k(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = jnp.broadcast_to((jnp.arange(_SCREAMING_SNAKE_CASE ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
UpperCAmelCase : Union[str, Any] = topk_scores.flatten()
UpperCAmelCase : Optional[int] = topk_indices.flatten() + shift
UpperCAmelCase : Optional[Any] = next_scores_flat.at[topk_indices_flat].set(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = next_scores_flat.reshape(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return next_scores
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
def __init__( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : str = bos_token_id
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> jnp.ndarray:
'''simple docstring'''
UpperCAmelCase : Optional[int] = jnp.full(scores.shape , -float("""inf""" ) )
UpperCAmelCase : Optional[int] = 1 - jnp.bool_(cur_len - 1 )
UpperCAmelCase : Optional[int] = jnp.where(_SCREAMING_SNAKE_CASE , new_scores.at[:, self.bos_token_id].set(0 ) , _SCREAMING_SNAKE_CASE )
return scores
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = max_length
UpperCAmelCase : Optional[int] = eos_token_id
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> jnp.ndarray:
'''simple docstring'''
UpperCAmelCase : Any = jnp.full(scores.shape , -float("""inf""" ) )
UpperCAmelCase : Dict = 1 - jnp.bool_(cur_len - self.max_length + 1 )
UpperCAmelCase : int = jnp.where(_SCREAMING_SNAKE_CASE , new_scores.at[:, self.eos_token_id].set(0 ) , _SCREAMING_SNAKE_CASE )
return scores
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or min_length < 0:
raise ValueError(F"`min_length` has to be a positive integer, but is {min_length}" )
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or eos_token_id < 0:
raise ValueError(F"`eos_token_id` has to be a positive integer, but is {eos_token_id}" )
UpperCAmelCase : List[Any] = min_length
UpperCAmelCase : Any = eos_token_id
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> jnp.ndarray:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
UpperCAmelCase : List[Any] = jnp.where(_SCREAMING_SNAKE_CASE , scores.at[:, self.eos_token_id].set(-float("""inf""" ) ) , _SCREAMING_SNAKE_CASE )
return scores
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : str = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = begin_index
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Any = 1 - jnp.bool_(cur_len - self.begin_index )
UpperCAmelCase : Optional[Any] = jnp.where(_SCREAMING_SNAKE_CASE , scores.at[:, self.begin_suppress_tokens].set(-float("""inf""" ) ) , _SCREAMING_SNAKE_CASE )
return scores
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
def __init__( self , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
UpperCAmelCase : str = list(_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> jnp.ndarray:
'''simple docstring'''
UpperCAmelCase : List[Any] = scores.at[..., self.suppress_tokens].set(-float("""inf""" ) )
return scores
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
def __init__( self , _SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = dict(_SCREAMING_SNAKE_CASE )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
UpperCAmelCase : List[Any] = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
UpperCAmelCase : List[Any] = force_token_array.at[index].set(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = jnp.intaa(_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> jnp.ndarray:
'''simple docstring'''
def _force_token(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase : int = scores.shape[0]
UpperCAmelCase : List[Any] = self.force_token_array[generation_idx]
UpperCAmelCase : int = jnp.ones_like(_SCREAMING_SNAKE_CASE , dtype=scores.dtype ) * -float("""inf""" )
UpperCAmelCase : List[str] = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
UpperCAmelCase : List[Any] = lax.dynamic_update_slice(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (0, current_token) )
return new_scores
UpperCAmelCase : Dict = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(_SCREAMING_SNAKE_CASE ) , lambda: scores , ) , )
return scores
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Tuple = generate_config.eos_token_id
UpperCAmelCase : List[Any] = generate_config.no_timestamps_token_id
UpperCAmelCase : str = generate_config.no_timestamps_token_id + 1
UpperCAmelCase : int = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(_SCREAMING_SNAKE_CASE , """max_initial_timestamp_index""" ):
UpperCAmelCase : Union[str, Any] = generate_config.max_initial_timestamp_index
else:
UpperCAmelCase : Dict = model_config.vocab_size
if self.max_initial_timestamp_index is None:
UpperCAmelCase : Union[str, Any] = model_config.vocab_size
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Optional[int] = scores.at[:, self.no_timestamps_token_id].set(-float("""inf""" ) )
def handle_pairs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Optional[int] = jnp.where((cur_len - self.begin_index) >= 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , _SCREAMING_SNAKE_CASE , )
UpperCAmelCase : Dict = jnp.where((cur_len - self.begin_index) < 2 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
return jnp.where(
_SCREAMING_SNAKE_CASE , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("""inf""" ) ) , scores_k.at[: self.eos_token_id].set(-float("""inf""" ) ) , ) , _SCREAMING_SNAKE_CASE , )
UpperCAmelCase : Optional[int] = jax.vmap(_SCREAMING_SNAKE_CASE )(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = jnp.where(cur_len == self.begin_index , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , _SCREAMING_SNAKE_CASE , )
UpperCAmelCase : Union[str, Any] = self.timestamp_begin + self.max_initial_timestamp_index
UpperCAmelCase : Any = jnp.where(
_SCREAMING_SNAKE_CASE , scores.at[:, last_allowed + 1 :].set(-float("""inf""" ) ) , _SCREAMING_SNAKE_CASE , )
# if sum of probability over timestamps is above any other token, sample timestamp
UpperCAmelCase : Dict = jax.nn.log_softmax(_SCREAMING_SNAKE_CASE , axis=-1 )
def handle_cumulative_probs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Any = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
UpperCAmelCase : Union[str, Any] = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("""inf""" ) ) , _SCREAMING_SNAKE_CASE , )
UpperCAmelCase : str = jax.vmap(_SCREAMING_SNAKE_CASE )(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return scores
| 109 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : int = logging.get_logger(__name__)
__A : Tuple = {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json",
"google/bigbird-roberta-large": "https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json",
"google/bigbird-base-trivia-itc": "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class A_ (a_ ):
UpperCAmelCase__ = '''big_bird'''
def __init__( self , _A=5_0_3_5_8 , _A=7_6_8 , _A=1_2 , _A=1_2 , _A=3_0_7_2 , _A="gelu_new" , _A=0.1 , _A=0.1 , _A=4_0_9_6 , _A=2 , _A=0.02 , _A=1E-12 , _A=True , _A=0 , _A=1 , _A=2 , _A=6_6 , _A="block_sparse" , _A=True , _A=False , _A=6_4 , _A=3 , _A=None , **_A , ):
'''simple docstring'''
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , sep_token_id=_A , **_A , )
UpperCAmelCase = vocab_size
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = type_vocab_size
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = use_cache
UpperCAmelCase = rescale_embeddings
UpperCAmelCase = attention_type
UpperCAmelCase = use_bias
UpperCAmelCase = block_size
UpperCAmelCase = num_random_blocks
UpperCAmelCase = classifier_dropout
class A_ (a_ ):
@property
def _lowercase ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 273 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowercase ( lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, unittest.TestCase ):
"""simple docstring"""
__A = StableDiffusionInstructPixaPixPipeline
__A = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width", "cross_attention_kwargs"}
__A = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__A = IMAGE_TO_IMAGE_IMAGE_PARAMS
__A = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase_ (self ):
"""simple docstring"""
torch.manual_seed(0 )
a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
a = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
torch.manual_seed(0 )
a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
a = CLIPTextModel(lowerCamelCase_ )
a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
a = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_=0 ):
"""simple docstring"""
a = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
a = image.cpu().permute(0 , 2 , 3 , 1 )[0]
a = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert("RGB" )
if str(lowerCamelCase_ ).startswith("mps" ):
a = torch.manual_seed(lowerCamelCase_ )
else:
a = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
a = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"image_guidance_scale": 1,
"output_type": "numpy",
}
return inputs
def UpperCamelCase_ (self ):
"""simple docstring"""
a = "cpu" # ensure determinism for the device-dependent torch.Generator
a = self.get_dummy_components()
a = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase_ )
a = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
a = self.get_dummy_inputs(lowerCamelCase_ )
a = sd_pipe(**lowerCamelCase_ ).images
a = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
a = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCamelCase_ (self ):
"""simple docstring"""
a = "cpu" # ensure determinism for the device-dependent torch.Generator
a = self.get_dummy_components()
a = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase_ )
a = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
a = self.get_dummy_inputs(lowerCamelCase_ )
a = "french fries"
a = sd_pipe(**lowerCamelCase_ , negative_prompt=lowerCamelCase_ )
a = output.images
a = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
a = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCamelCase_ (self ):
"""simple docstring"""
a = "cpu" # ensure determinism for the device-dependent torch.Generator
a = self.get_dummy_components()
a = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase_ )
a = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
a = self.get_dummy_inputs(lowerCamelCase_ )
a = [inputs["prompt"]] * 2
a = np.array(inputs["image"] ).astype(np.floataa ) / 255.0
a = torch.from_numpy(lowerCamelCase_ ).unsqueeze(0 ).to(lowerCamelCase_ )
a = image / 2 + 0.5
a = image.permute(0 , 3 , 1 , 2 )
a = image.repeat(2 , 1 , 1 , 1 )
a = sd_pipe(**lowerCamelCase_ ).images
a = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
a = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCamelCase_ (self ):
"""simple docstring"""
a = "cpu" # ensure determinism for the device-dependent torch.Generator
a = self.get_dummy_components()
a = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" )
a = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase_ )
a = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
a = self.get_dummy_inputs(lowerCamelCase_ )
a = sd_pipe(**lowerCamelCase_ ).images
a = image[0, -3:, -3:, -1]
a = [round(lowerCamelCase_ , 4 ) for x in image_slice.flatten().tolist()]
print(",".join([str(lowerCamelCase_ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
a = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCamelCase_ (self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.get_dummy_components()
a = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase_ )
a = VaeImageProcessor(do_resize=lowerCamelCase_ , do_normalize=lowerCamelCase_ )
a = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
a = pipe(**self.get_dummy_inputs_by_type(lowerCamelCase_ , input_image_type="pt" ) )[0]
a = components["vae"]
a = self.get_dummy_inputs_by_type(lowerCamelCase_ , input_image_type="pt" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
a = vae.encode(inputs[image_param] ).latent_dist.mode()
a = pipe(**lowerCamelCase_ )[0]
a = np.abs(out - out_latents_inputs ).max()
self.assertLess(lowerCamelCase_ , 1E-4 , "passing latents as image input generate different result from passing image" )
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase_ (self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ (self , lowerCamelCase_=0 ):
"""simple docstring"""
a = torch.manual_seed(lowerCamelCase_ )
a = load_image(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg" )
a = {
"prompt": "turn him into a cyborg",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"image_guidance_scale": 1.0,
"output_type": "numpy",
}
return inputs
def UpperCamelCase_ (self ):
"""simple docstring"""
a = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=lowerCamelCase_ )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing()
a = self.get_inputs()
a = pipe(**lowerCamelCase_ ).images
a = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
a = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def UpperCamelCase_ (self ):
"""simple docstring"""
a = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=lowerCamelCase_ )
a = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing()
a = self.get_inputs()
a = pipe(**lowerCamelCase_ ).images
a = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
a = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def UpperCamelCase_ (self ):
"""simple docstring"""
a = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=lowerCamelCase_ )
a = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing()
a = self.get_inputs()
a = pipe(**lowerCamelCase_ ).images
a = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
a = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def UpperCamelCase_ (self ):
"""simple docstring"""
a = 0
def callback_fn(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> None:
a = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
a = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
a = latents[0, -3:, -3:, -1]
a = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
a = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
a = latents[0, -3:, -3:, -1]
a = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
a = False
a = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=lowerCamelCase_ , torch_dtype=torch.floataa )
a = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing()
a = self.get_inputs()
pipe(**lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def UpperCamelCase_ (self ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
a = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=lowerCamelCase_ , torch_dtype=torch.floataa )
a = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
a = self.get_inputs()
a = pipe(**lowerCamelCase_ )
a = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
a = inputs["image"].resize((504, 504) )
a = "timbrooks/instruct-pix2pix"
a = StableDiffusionInstructPixaPixPipeline.from_pretrained(
lowerCamelCase_ , safety_checker=lowerCamelCase_ , )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing()
a = pipe(**lowerCamelCase_ )
a = output.images[0]
a = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
a = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 71 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def a( A : List[str] , A : int=0.999 , A : Union[str, Any]="cosine" , ) -> Optional[int]:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(A : Optional[Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(A : Dict ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
a = []
for i in range(A ):
a = i / num_diffusion_timesteps
a = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(A ) / alpha_bar_fn(A ) , A ) )
return torch.tensor(A , dtype=torch.floataa )
class _lowercase ( lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
__A = [e.name for e in KarrasDiffusionSchedulers]
__A = 2
@register_to_config
def __init__(self , lowerCamelCase_ = 1000 , lowerCamelCase_ = 0.0_0085 , lowerCamelCase_ = 0.012 , lowerCamelCase_ = "linear" , lowerCamelCase_ = None , lowerCamelCase_ = "epsilon" , lowerCamelCase_ = "linspace" , lowerCamelCase_ = 0 , ):
"""simple docstring"""
if trained_betas is not None:
a = torch.tensor(lowerCamelCase_ , dtype=torch.floataa )
elif beta_schedule == "linear":
a = torch.linspace(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
a = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCamelCase_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
a = betas_for_alpha_bar(lowerCamelCase_ )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
a = 1.0 - self.betas
a = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_=None ):
"""simple docstring"""
if schedule_timesteps is None:
a = self.timesteps
a = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
a = 1 if len(lowerCamelCase_ ) > 1 else 0
else:
a = timestep.cpu().item() if torch.is_tensor(lowerCamelCase_ ) else timestep
a = self._index_counter[timestep_int]
return indices[pos].item()
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , ):
"""simple docstring"""
a = self.index_for_timestep(lowerCamelCase_ )
if self.state_in_first_order:
a = self.sigmas[step_index]
else:
a = self.sigmas_interpol[step_index]
a = sample / ((sigma**2 + 1) ** 0.5)
return sample
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , ):
"""simple docstring"""
a = num_inference_steps
a = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
a = np.linspace(0 , num_train_timesteps - 1 , lowerCamelCase_ , dtype=lowerCamelCase_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
a = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
a = (np.arange(0 , lowerCamelCase_ ) * step_ratio).round()[::-1].copy().astype(lowerCamelCase_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
a = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
a = (np.arange(lowerCamelCase_ , 0 , -step_ratio )).round().copy().astype(lowerCamelCase_ )
timesteps -= 1
else:
raise ValueError(
F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
a = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
a = torch.from_numpy(np.log(lowerCamelCase_ ) ).to(lowerCamelCase_ )
a = np.interp(lowerCamelCase_ , np.arange(0 , len(lowerCamelCase_ ) ) , lowerCamelCase_ )
a = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
a = torch.from_numpy(lowerCamelCase_ ).to(device=lowerCamelCase_ )
# interpolate sigmas
a = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
a = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
a = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(lowerCamelCase_ ).startswith("mps" ):
# mps does not support float64
a = torch.from_numpy(lowerCamelCase_ ).to(lowerCamelCase_ , dtype=torch.floataa )
else:
a = torch.from_numpy(lowerCamelCase_ ).to(lowerCamelCase_ )
# interpolate timesteps
a = self.sigma_to_t(lowerCamelCase_ ).to(lowerCamelCase_ , dtype=timesteps.dtype )
a = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
a = torch.cat([timesteps[:1], interleaved_timesteps] )
a = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
a = defaultdict(lowerCamelCase_ )
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
a = sigma.log()
# get distribution
a = log_sigma - self.log_sigmas[:, None]
# get sigmas range
a = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
a = low_idx + 1
a = self.log_sigmas[low_idx]
a = self.log_sigmas[high_idx]
# interpolate sigmas
a = (low - log_sigma) / (low - high)
a = w.clamp(0 , 1 )
# transform interpolation to time range
a = (1 - w) * low_idx + w * high_idx
a = t.view(sigma.shape )
return t
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
return self.sample is None
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = True , ):
"""simple docstring"""
a = self.index_for_timestep(lowerCamelCase_ )
# advance index counter by 1
a = timestep.cpu().item() if torch.is_tensor(lowerCamelCase_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
a = self.sigmas[step_index]
a = self.sigmas_interpol[step_index + 1]
a = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
a = self.sigmas[step_index - 1]
a = self.sigmas_interpol[step_index]
a = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
a = 0
a = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
a = sigma_hat if self.state_in_first_order else sigma_interpol
a = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
a = sigma_hat if self.state_in_first_order else sigma_interpol
a = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("prediction_type not implemented yet: sample" )
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
a = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
a = sigma_interpol - sigma_hat
# store for 2nd order step
a = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
a = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
a = sigma_next - sigma_hat
a = self.sample
a = None
a = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCamelCase_ )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ):
"""simple docstring"""
a = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(lowerCamelCase_ ):
# mps does not support float64
a = self.timesteps.to(original_samples.device , dtype=torch.floataa )
a = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
a = self.timesteps.to(original_samples.device )
a = timesteps.to(original_samples.device )
a = [self.index_for_timestep(lowerCamelCase_ , lowerCamelCase_ ) for t in timesteps]
a = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
a = sigma.unsqueeze(-1 )
a = original_samples + noise * sigma
return noisy_samples
def __len__(self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 71 | 1 |
'''simple docstring'''
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
lowerCAmelCase :Union[str, Any] = get_tests_dir('''fixtures/dummy-config.json''')
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[Any] ) -> Dict:
__magic_name__ : Union[str, Any] = 0
def __lowerCAmelCase ( self : str ) -> int:
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec('transformers.models.auto' ) )
def __lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
__magic_name__ : List[str] = AutoConfig.from_pretrained('bert-base-uncased' )
self.assertIsInstance(lowercase_ , lowercase_ )
def __lowerCAmelCase ( self : Tuple ) -> List[Any]:
__magic_name__ : int = AutoConfig.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def __lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
__magic_name__ : int = AutoConfig.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def __lowerCAmelCase ( self : Tuple ) -> List[str]:
__magic_name__ : Optional[int] = AutoConfig.for_model('roberta' )
self.assertIsInstance(lowercase_ , lowercase_ )
def __lowerCAmelCase ( self : Tuple ) -> str:
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
__magic_name__ : List[str] = os.path.join(lowercase_ , 'fake-roberta' )
os.makedirs(lowercase_ , exist_ok=lowercase_ )
with open(os.path.join(lowercase_ , 'config.json' ) , 'w' ) as f:
f.write(json.dumps({} ) )
__magic_name__ : int = AutoConfig.from_pretrained(lowercase_ )
self.assertEqual(type(lowercase_ ) , lowercase_ )
def __lowerCAmelCase ( self : str ) -> List[str]:
try:
AutoConfig.register('custom' , lowercase_ )
# Wrong model type will raise an error
with self.assertRaises(lowercase_ ):
AutoConfig.register('model' , lowercase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase_ ):
AutoConfig.register('bert' , lowercase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
__magic_name__ : Union[str, Any] = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowercase_ )
__magic_name__ : Any = AutoConfig.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def __lowerCAmelCase ( self : Any ) -> Optional[Any]:
with self.assertRaisesRegex(
lowercase_ , 'bert-base is not a local folder and is not a valid model identifier' ):
__magic_name__ : Union[str, Any] = AutoConfig.from_pretrained('bert-base' )
def __lowerCAmelCase ( self : Tuple ) -> List[Any]:
with self.assertRaisesRegex(
lowercase_ , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
__magic_name__ : str = AutoConfig.from_pretrained(lowercase_ , revision='aaaaaa' )
def __lowerCAmelCase ( self : str ) -> str:
with self.assertRaisesRegex(
lowercase_ , 'hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.' , ):
__magic_name__ : Dict = AutoConfig.from_pretrained('hf-internal-testing/no-config-test-repo' )
def __lowerCAmelCase ( self : Optional[Any] ) -> int:
with self.assertRaises(lowercase_ ):
__magic_name__ : Optional[int] = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase_ ):
__magic_name__ : List[str] = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=lowercase_ )
__magic_name__ : List[Any] = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=lowercase_ )
self.assertEqual(config.__class__.__name__ , 'NewModelConfig' )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowercase_ )
__magic_name__ : List[str] = AutoConfig.from_pretrained(lowercase_ , trust_remote_code=lowercase_ )
self.assertEqual(reloaded_config.__class__.__name__ , 'NewModelConfig' )
def __lowerCAmelCase ( self : str ) -> int:
class _lowerCamelCase ( UpperCamelCase_ ):
'''simple docstring'''
A_ : int = 'new-model'
try:
AutoConfig.register('new-model' , lowercase_ )
# If remote code is not set, the default is to use local
__magic_name__ : Union[str, Any] = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' )
self.assertEqual(config.__class__.__name__ , 'NewModelConfigLocal' )
# If remote code is disabled, we load the local one.
__magic_name__ : Union[str, Any] = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=lowercase_ )
self.assertEqual(config.__class__.__name__ , 'NewModelConfigLocal' )
# If remote is enabled, we load from the Hub
__magic_name__ : Any = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=lowercase_ )
self.assertEqual(config.__class__.__name__ , 'NewModelConfig' )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"] | 331 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
lowerCamelCase = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
lowerCamelCase = TaTokenizerFast
lowerCamelCase = {'configuration_mt5': ['MT5Config', 'MT5OnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
'MT5EncoderModel',
'MT5ForConditionalGeneration',
'MT5ForQuestionAnswering',
'MT5Model',
'MT5PreTrainedModel',
'MT5Stack',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
lowerCamelCase = _LazyModule(
__name__,
globals()['__file__'],
_import_structure,
extra_objects={'MT5Tokenizer': MTaTokenizer, 'MT5TokenizerFast': MTaTokenizerFast},
module_spec=__spec__,
)
| 199 | 0 |
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
snake_case : int = False
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Optional[int] ,__snake_case :Any=32 ) -> int:
set_seed(0 )
a__ = UNetaDModel(sample_size=__snake_case ,in_channels=3 ,out_channels=3 )
a__ = torch.optim.SGD(model.parameters() ,lr=0.00_01 )
return model, optimizer
@slow
def lowerCamelCase__( self :str ) -> Union[str, Any]:
a__ = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
a__ = DDPMScheduler(
num_train_timesteps=10_00 ,beta_start=0.00_01 ,beta_end=0.02 ,beta_schedule='linear' ,clip_sample=__snake_case ,)
a__ = DDIMScheduler(
num_train_timesteps=10_00 ,beta_start=0.00_01 ,beta_end=0.02 ,beta_schedule='linear' ,clip_sample=__snake_case ,)
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
a__ = [torch.randn((4, 3, 32, 32) ).clip(-1 ,1 ).to(__snake_case ) for _ in range(4 )]
a__ = [torch.randn((4, 3, 32, 32) ).to(__snake_case ) for _ in range(4 )]
a__ = [torch.randint(0 ,10_00 ,(4,) ).long().to(__snake_case ) for _ in range(4 )]
# train with a DDPM scheduler
a__ , a__ = self.get_model_optimizer(resolution=32 )
model.train().to(__snake_case )
for i in range(4 ):
optimizer.zero_grad()
a__ = ddpm_scheduler.add_noise(clean_images[i] ,noise[i] ,timesteps[i] )
a__ = model(__snake_case ,timesteps[i] ).sample
a__ = torch.nn.functional.mse_loss(__snake_case ,noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
a__ , a__ = self.get_model_optimizer(resolution=32 )
model.train().to(__snake_case )
for i in range(4 ):
optimizer.zero_grad()
a__ = ddim_scheduler.add_noise(clean_images[i] ,noise[i] ,timesteps[i] )
a__ = model(__snake_case ,timesteps[i] ).sample
a__ = torch.nn.functional.mse_loss(__snake_case ,noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(__snake_case ,__snake_case ,atol=1E-5 ) )
self.assertTrue(torch.allclose(__snake_case ,__snake_case ,atol=1E-5 ) )
| 369 |
from __future__ import annotations
def __lowercase ( __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float ):
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance < 0:
raise ValueError('Resistance cannot be negative' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 109 | 0 |
from ..utils import DummyObject, requires_backends
class lowercase__( metaclass=UpperCAmelCase ):
"""simple docstring"""
a :str = ['flax']
def __init__( self : int , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : List[str] ) -> int:
requires_backends(self , ['''flax'''] )
@classmethod
def _lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : Dict ) -> str:
requires_backends(cls , ['''flax'''] )
@classmethod
def _lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Dict:
requires_backends(cls , ['''flax'''] )
class lowercase__( metaclass=UpperCAmelCase ):
"""simple docstring"""
a :List[Any] = ['flax']
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : str ) -> Union[str, Any]:
requires_backends(self , ['''flax'''] )
@classmethod
def _lowercase ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : List[str] ) -> int:
requires_backends(cls , ['''flax'''] )
@classmethod
def _lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : List[str] ) -> int:
requires_backends(cls , ['''flax'''] )
class lowercase__( metaclass=UpperCAmelCase ):
"""simple docstring"""
a :int = ['flax']
def __init__( self : List[str] , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : int ) -> Optional[int]:
requires_backends(self , ['''flax'''] )
@classmethod
def _lowercase ( cls : int , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Any ) -> Any:
requires_backends(cls , ['''flax'''] )
class lowercase__( metaclass=UpperCAmelCase ):
"""simple docstring"""
a :str = ['flax']
def __init__( self : Tuple , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : Tuple ) -> Tuple:
requires_backends(self , ['''flax'''] )
@classmethod
def _lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> str:
requires_backends(cls , ['''flax'''] )
@classmethod
def _lowercase ( cls : str , *SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> Optional[int]:
requires_backends(cls , ['''flax'''] )
class lowercase__( metaclass=UpperCAmelCase ):
"""simple docstring"""
a :Tuple = ['flax']
def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Optional[int]:
requires_backends(self , ['''flax'''] )
@classmethod
def _lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> List[str]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> List[Any]:
requires_backends(cls , ['''flax'''] )
class lowercase__( metaclass=UpperCAmelCase ):
"""simple docstring"""
a :List[Any] = ['flax']
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : Any ) -> int:
requires_backends(self , ['''flax'''] )
@classmethod
def _lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : Dict ) -> List[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : str ) -> List[str]:
requires_backends(cls , ['''flax'''] )
class lowercase__( metaclass=UpperCAmelCase ):
"""simple docstring"""
a :Dict = ['flax']
def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Tuple:
requires_backends(self , ['''flax'''] )
@classmethod
def _lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Dict:
requires_backends(cls , ['''flax'''] )
@classmethod
def _lowercase ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : int ) -> Dict:
requires_backends(cls , ['''flax'''] )
class lowercase__( metaclass=UpperCAmelCase ):
"""simple docstring"""
a :Any = ['flax']
def __init__( self : List[str] , *SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Tuple:
requires_backends(self , ['''flax'''] )
@classmethod
def _lowercase ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> int:
requires_backends(cls , ['''flax'''] )
@classmethod
def _lowercase ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Any:
requires_backends(cls , ['''flax'''] )
class lowercase__( metaclass=UpperCAmelCase ):
"""simple docstring"""
a :str = ['flax']
def __init__( self : Dict , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : Dict ) -> List[str]:
requires_backends(self , ['''flax'''] )
@classmethod
def _lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : List[str] ) -> str:
requires_backends(cls , ['''flax'''] )
@classmethod
def _lowercase ( cls : int , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : Any ) -> str:
requires_backends(cls , ['''flax'''] )
class lowercase__( metaclass=UpperCAmelCase ):
"""simple docstring"""
a :Any = ['flax']
def __init__( self : Dict , *SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : Dict ) -> str:
requires_backends(self , ['''flax'''] )
@classmethod
def _lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : int ) -> Tuple:
requires_backends(cls , ['''flax'''] )
@classmethod
def _lowercase ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Union[str, Any]:
requires_backends(cls , ['''flax'''] )
class lowercase__( metaclass=UpperCAmelCase ):
"""simple docstring"""
a :Dict = ['flax']
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : Dict ) -> Optional[Any]:
requires_backends(self , ['''flax'''] )
@classmethod
def _lowercase ( cls : int , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> List[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : Tuple ) -> Tuple:
requires_backends(cls , ['''flax'''] )
class lowercase__( metaclass=UpperCAmelCase ):
"""simple docstring"""
a :Dict = ['flax']
def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Tuple:
requires_backends(self , ['''flax'''] )
@classmethod
def _lowercase ( cls : int , *SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : Dict ) -> Optional[int]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _lowercase ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Any:
requires_backends(cls , ['''flax'''] )
class lowercase__( metaclass=UpperCAmelCase ):
"""simple docstring"""
a :List[Any] = ['flax']
def __init__( self : Any , *SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : Dict ) -> List[str]:
requires_backends(self , ['''flax'''] )
@classmethod
def _lowercase ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> int:
requires_backends(cls , ['''flax'''] )
@classmethod
def _lowercase ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> int:
requires_backends(cls , ['''flax'''] )
| 30 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class lowercase__( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : List[str] ) -> List[Any]:
lowercase_ = 1_0
def _lowercase ( self : int ) -> List[str]:
lowercase_ = [1, 2, 3, 4]
lowercase_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(SCREAMING_SNAKE_CASE_ , self.block_size , 0 ) , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : int ) -> Optional[Any]:
lowercase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
lowercase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(SCREAMING_SNAKE_CASE_ , self.block_size , 0 ) , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
lowercase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0, 1_1, 1_2, 1_3]
lowercase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(SCREAMING_SNAKE_CASE_ , self.block_size , 0 ) , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Any ) -> List[Any]:
lowercase_ = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
lowercase_ , lowercase_ = process_story(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , [] )
def _lowercase ( self : List[str] ) -> List[str]:
lowercase_ = ''''''
lowercase_ , lowercase_ = process_story(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , [] )
self.assertEqual(SCREAMING_SNAKE_CASE_ , [] )
def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
lowercase_ = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
lowercase_ , lowercase_ = process_story(SCREAMING_SNAKE_CASE_ )
lowercase_ = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = ['''It was the best of times.''']
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Union[str, Any] ) -> Optional[Any]:
lowercase_ = torch.tensor([1, 2, 3, 4] )
lowercase_ = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(SCREAMING_SNAKE_CASE_ , 0 ).numpy() , expected.numpy() )
def _lowercase ( self : List[Any] ) -> Tuple:
lowercase_ = torch.tensor([1, 2, 3, 4, 2_3, 2_3, 2_3] )
lowercase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(SCREAMING_SNAKE_CASE_ , 2_3 ).numpy() , expected.numpy() )
def _lowercase ( self : int ) -> Dict:
lowercase_ = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
lowercase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(SCREAMING_SNAKE_CASE_ , 1 ).numpy() , expected.numpy() )
def _lowercase ( self : List[str] ) -> Tuple:
lowercase_ = 1_0_1
lowercase_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_0_1, 5, 6], [1, 1_0_1, 3, 4, 1_0_1, 6]] )
lowercase_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
lowercase_ = compute_token_type_ids(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
np.testing.assert_array_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 30 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
__UpperCAmelCase = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def UpperCamelCase ( snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : Optional[int] ) -> Optional[Any]:
for attribute in key.split('.' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
UpperCamelCase : int = 'lm_head'
UpperCamelCase : Dict = getattr(lowercase_ , lowercase_ )
if weight_type is not None:
UpperCamelCase : Optional[Any] = getattr(lowercase_ , lowercase_ ).shape
else:
UpperCamelCase : Optional[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
UpperCamelCase : Optional[int] = value
elif weight_type == "weight_g":
UpperCamelCase : List[str] = value
elif weight_type == "weight_v":
UpperCamelCase : str = value
elif weight_type == "bias":
UpperCamelCase : Dict = value
else:
UpperCamelCase : List[Any] = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def UpperCamelCase ( snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : Optional[int] ) -> List[Any]:
UpperCamelCase : Union[str, Any] = []
UpperCamelCase : Tuple = fairseq_model.state_dict()
UpperCamelCase : List[str] = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase : List[str] = False
if "conv_layers" in name:
load_conv_layer(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , hf_model.config.feat_extract_norm == 'group' , )
UpperCamelCase : Any = True
else:
for key, mapped_key in MAPPING.items():
UpperCamelCase : Union[str, Any] = 'unispeech.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
UpperCamelCase : Dict = True
if "*" in mapped_key:
UpperCamelCase : Union[str, Any] = name.split(lowercase_ )[0].split('.' )[-2]
UpperCamelCase : List[str] = mapped_key.replace('*' , lowercase_ )
if "weight_g" in name:
UpperCamelCase : List[Any] = 'weight_g'
elif "weight_v" in name:
UpperCamelCase : str = 'weight_v'
elif "bias" in name:
UpperCamelCase : Optional[Any] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase : Optional[int] = 'weight'
else:
UpperCamelCase : Union[str, Any] = None
set_recursively(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
continue
if not is_used:
unused_weights.append(lowercase_ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def UpperCamelCase ( snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : Dict ) -> List[str]:
UpperCamelCase : List[Any] = full_name.split('conv_layers.' )[-1]
UpperCamelCase : Any = name.split('.' )
UpperCamelCase : Union[str, Any] = int(items[0] )
UpperCamelCase : List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
UpperCamelCase : List[Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
UpperCamelCase : List[Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
UpperCamelCase : int = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
UpperCamelCase : Optional[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowercase_ )
@torch.no_grad()
def UpperCamelCase ( snake_case__ : str , snake_case__ : Any , snake_case__ : int=None , snake_case__ : Optional[int]=None , snake_case__ : str=True ) -> List[Any]:
if config_path is not None:
UpperCamelCase : Union[str, Any] = UniSpeechConfig.from_pretrained(lowercase_ )
else:
UpperCamelCase : Dict = UniSpeechConfig()
if is_finetuned:
if dict_path:
UpperCamelCase : str = Dictionary.load_from_json(lowercase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCamelCase : Optional[Any] = target_dict.pad_index
UpperCamelCase : Optional[Any] = target_dict.bos_index
UpperCamelCase : List[str] = target_dict.eos_index
UpperCamelCase : Tuple = len(target_dict.symbols )
UpperCamelCase : str = os.path.join(lowercase_ , 'vocab.json' )
if not os.path.isdir(lowercase_ ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowercase_ ) )
return
os.makedirs(lowercase_ , exist_ok=lowercase_ )
UpperCamelCase : int = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCamelCase : Optional[int] = 42
UpperCamelCase : Optional[Any] = 43
with open(lowercase_ , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(lowercase_ , lowercase_ )
UpperCamelCase : List[str] = WavaVecaPhonemeCTCTokenizer(
lowercase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowercase_ , )
UpperCamelCase : Union[str, Any] = True if config.feat_extract_norm == 'layer' else False
UpperCamelCase : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowercase_ , return_attention_mask=lowercase_ , )
UpperCamelCase : Dict = WavaVecaProcessor(feature_extractor=lowercase_ , tokenizer=lowercase_ )
processor.save_pretrained(lowercase_ )
UpperCamelCase : Any = UniSpeechForCTC(lowercase_ )
else:
UpperCamelCase : Optional[int] = UniSpeechForPreTraining(lowercase_ )
if is_finetuned:
UpperCamelCase , UpperCamelCase , UpperCamelCase : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] ), 'w2v_path': checkpoint_path} )
else:
UpperCamelCase , UpperCamelCase , UpperCamelCase : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
UpperCamelCase : Optional[int] = model[0].eval()
recursively_load_weights(lowercase_ , lowercase_ , lowercase_ )
hf_unispeech.save_pretrained(lowercase_ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
__UpperCAmelCase = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 357 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def UpperCamelCase ( ) -> Tuple:
UpperCamelCase : Any = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
UpperCamelCase : List[str] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert('RGB' )
return image
def UpperCamelCase ( snake_case__ : int ) -> List[Any]:
UpperCamelCase : Optional[int] = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.weight""", F"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.bias""", F"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.weight""", F"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.bias""", F"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.qkv.weight""", F"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.weight""", F"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.bias""", F"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def UpperCamelCase ( snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Optional[int] ) -> Optional[int]:
UpperCamelCase : Dict = dct.pop(snake_case__ )
UpperCamelCase : str = val
def UpperCamelCase ( snake_case__ : str , snake_case__ : Union[str, Any] ) -> Optional[int]:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
UpperCamelCase : Optional[Any] = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.q_bias""" )
UpperCamelCase : int = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
UpperCamelCase : int = torch.cat((q_bias, torch.zeros_like(snake_case__ , requires_grad=snake_case__ ), v_bias) )
UpperCamelCase : Tuple = qkv_bias
def UpperCamelCase ( snake_case__ : Tuple , snake_case__ : Optional[Any] ) -> Dict:
UpperCamelCase : str = 364 if 'coco' in model_name else 224
UpperCamelCase : Union[str, Any] = BlipaVisionConfig(image_size=snake_case__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
UpperCamelCase : List[Any] = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=snake_case__ ).to_dict()
elif "opt-6.7b" in model_name:
UpperCamelCase : int = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=snake_case__ ).to_dict()
elif "t5-xl" in model_name:
UpperCamelCase : List[str] = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
UpperCamelCase : int = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
UpperCamelCase : Any = BlipaConfig(vision_config=snake_case__ , text_config=snake_case__ )
return config, image_size
@torch.no_grad()
def UpperCamelCase ( snake_case__ : int , snake_case__ : Dict=None , snake_case__ : int=False ) -> List[Any]:
UpperCamelCase : str = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
UpperCamelCase : int = tokenizer('\n' , add_special_tokens=snake_case__ ).input_ids[0]
UpperCamelCase , UpperCamelCase : Union[str, Any] = get_blipa_config(snake_case__ , eos_token_id=snake_case__ )
UpperCamelCase : Dict = BlipaForConditionalGeneration(snake_case__ ).eval()
UpperCamelCase : Optional[Any] = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
UpperCamelCase , UpperCamelCase : Optional[Any] = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
UpperCamelCase : List[str] = 'cuda' if torch.cuda.is_available() else 'cpu'
UpperCamelCase , UpperCamelCase , UpperCamelCase : Tuple = load_model_and_preprocess(
name=snake_case__ , model_type=snake_case__ , is_eval=snake_case__ , device=snake_case__ )
original_model.eval()
print('Done!' )
# update state dict keys
UpperCamelCase : List[Any] = original_model.state_dict()
UpperCamelCase : Tuple = create_rename_keys(snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
UpperCamelCase : Optional[Any] = state_dict.pop(snake_case__ )
if key.startswith('Qformer.bert' ):
UpperCamelCase : List[str] = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
UpperCamelCase : Tuple = key.replace('self' , 'attention' )
if "opt_proj" in key:
UpperCamelCase : Union[str, Any] = key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
UpperCamelCase : Optional[Any] = key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
UpperCamelCase : Dict = key.replace('opt' , 'language' )
if key.startswith('t5' ):
UpperCamelCase : Dict = key.replace('t5' , 'language' )
UpperCamelCase : Optional[int] = val
# read in qv biases
read_in_q_v_bias(snake_case__ , snake_case__ )
UpperCamelCase , UpperCamelCase : Any = hf_model.load_state_dict(snake_case__ , strict=snake_case__ )
assert len(snake_case__ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
UpperCamelCase : List[str] = load_demo_image()
UpperCamelCase : str = vis_processors['eval'](snake_case__ ).unsqueeze(0 ).to(snake_case__ )
UpperCamelCase : Any = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(snake_case__ )
# create processor
UpperCamelCase : Optional[Any] = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=snake_case__ , image_std=snake_case__ )
UpperCamelCase : Any = BlipaProcessor(image_processor=snake_case__ , tokenizer=snake_case__ )
UpperCamelCase : Optional[int] = processor(images=snake_case__ , return_tensors='pt' ).pixel_values.to(snake_case__ )
# make sure processor creates exact same pixel values
assert torch.allclose(snake_case__ , snake_case__ )
original_model.to(snake_case__ )
hf_model.to(snake_case__ )
with torch.no_grad():
if "opt" in model_name:
UpperCamelCase : Tuple = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
UpperCamelCase : str = hf_model(snake_case__ , snake_case__ ).logits
else:
UpperCamelCase : Tuple = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
UpperCamelCase : List[Any] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
UpperCamelCase : Optional[int] = hf_model(snake_case__ , snake_case__ , labels=snake_case__ ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
UpperCamelCase : List[str] = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=snake_case__ )
assert torch.allclose(logits[0, :3, :3] , snake_case__ , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
UpperCamelCase : Union[str, Any] = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=snake_case__ )
else:
# cast to same type
UpperCamelCase : Optional[int] = logits.dtype
assert torch.allclose(original_logits.to(snake_case__ ) , snake_case__ , atol=1E-2 )
print('Looks ok!' )
print('Generating a caption...' )
UpperCamelCase : Optional[int] = ''
UpperCamelCase : Union[str, Any] = tokenizer(snake_case__ , return_tensors='pt' ).input_ids.to(snake_case__ )
UpperCamelCase : str = original_model.generate({'image': original_pixel_values} )
UpperCamelCase : str = hf_model.generate(
snake_case__ , snake_case__ , do_sample=snake_case__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , snake_case__ )
UpperCamelCase : Optional[int] = input_ids.shape[1]
UpperCamelCase : Union[str, Any] = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=snake_case__ )
UpperCamelCase : Dict = [text.strip() for text in output_text]
print('HF generation:' , snake_case__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(snake_case__ )
hf_model.save_pretrained(snake_case__ )
if push_to_hub:
processor.push_to_hub(F"""nielsr/{model_name}""" )
hf_model.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
__UpperCAmelCase = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
__UpperCAmelCase = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 103 | 0 |
"""simple docstring"""
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
_UpperCamelCase : Dict = logging.get_logger(__name__)
_UpperCamelCase : str = '''T5Config'''
def _SCREAMING_SNAKE_CASE ( __snake_case : int , __snake_case : Dict , __snake_case : List[str] ):
'''simple docstring'''
lowercase = jnp.zeros_like(_lowercase )
lowercase = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
lowercase = shifted_input_ids.at[:, 0].set(_lowercase )
lowercase = jnp.where(shifted_input_ids == -1_00 , _lowercase , _lowercase )
return shifted_input_ids
class a ( _lowercase ):
UpperCAmelCase_ : Optional[int] ="mt5"
UpperCAmelCase_ : Dict =MTaConfig
class a ( _lowercase ):
UpperCAmelCase_ : Tuple ="mt5"
UpperCAmelCase_ : int =MTaConfig
class a ( _lowercase ):
UpperCAmelCase_ : Optional[int] ="mt5"
UpperCAmelCase_ : Union[str, Any] =MTaConfig
| 220 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def lowercase_ ( _lowercase ) -> List[Any]:
'''simple docstring'''
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class __lowercase ( nn.Module ):
def __init__(self , A , A ):
super().__init__()
lowerCamelCase_ : Tuple = module
lowerCamelCase_ : Any = nn.Sequential(
nn.Linear(module.in_features , A , bias=A ) , nn.Linear(A , module.out_features , bias=A ) , )
lowerCamelCase_ : Optional[Any] = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=A )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def UpperCAmelCase__ (self , A , *A , **A ):
return self.module(A , *A , **A ) + self.adapter(A )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowercase ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
lowerCamelCase : Tuple = "bigscience/bloom-1b7"
# Constant values
lowerCamelCase : List[Any] = 2.1_0_9_6_5_9_5_5_2_6_9_2_5_7_4
lowerCamelCase : int = "Hello my name is"
lowerCamelCase : Tuple = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
lowerCamelCase : Optional[int] = 10
def UpperCAmelCase__ (self ):
# Models and tokenizer
lowerCamelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(self.model_name )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
# Models and tokenizer
lowerCamelCase_ : Tuple = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='''auto''' )
lowerCamelCase_ : List[str] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
def UpperCAmelCase__ (self ):
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.model_abit.config
self.assertTrue(hasattr(A , '''quantization_config''' ) )
lowerCamelCase_ : Tuple = config.to_dict()
lowerCamelCase_ : Optional[Any] = config.to_diff_dict()
lowerCamelCase_ : Any = config.to_json_string()
def UpperCAmelCase__ (self ):
from bitsandbytes.nn import Paramsabit
lowerCamelCase_ : str = self.model_fpaa.get_memory_footprint()
lowerCamelCase_ : List[str] = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
lowerCamelCase_ : Optional[int] = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def UpperCAmelCase__ (self ):
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(A , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self.tokenizer(self.input_text , return_tensors='''pt''' )
lowerCamelCase_ : Union[str, Any] = self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=A ) , self.EXPECTED_OUTPUTS )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = BitsAndBytesConfig()
lowerCamelCase_ : Optional[Any] = True
lowerCamelCase_ : str = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=A , device_map='''auto''' )
lowerCamelCase_ : str = self.tokenizer(self.input_text , return_tensors='''pt''' )
lowerCamelCase_ : int = model_abit_from_config.generate(
input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=A ) , self.EXPECTED_OUTPUTS )
def UpperCAmelCase__ (self ):
with self.assertRaises(A ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = BitsAndBytesConfig()
with self.assertRaises(A ):
lowerCamelCase_ : List[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=A , load_in_abit=A , device_map='''auto''' , bnb_abit_quant_type='''nf4''' , )
def UpperCAmelCase__ (self ):
with self.assertRaises(A ):
# Tries with `str`
self.model_abit.to('''cpu''' )
with self.assertRaises(A ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(A ):
# Tries with a `device`
self.model_abit.to(torch.device('''cuda:0''' ) )
with self.assertRaises(A ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(A ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
lowerCamelCase_ : str = self.tokenizer(self.input_text , return_tensors='''pt''' )
lowerCamelCase_ : List[Any] = self.model_fpaa.to(torch.floataa )
lowerCamelCase_ : Tuple = self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
# Check this does not throw an error
lowerCamelCase_ : str = self.model_fpaa.to('''cpu''' )
# Check this does not throw an error
lowerCamelCase_ : List[Any] = self.model_fpaa.half()
# Check this does not throw an error
lowerCamelCase_ : List[str] = self.model_fpaa.float()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = AutoModelForSeqaSeqLM.from_pretrained('''t5-small''' , load_in_abit=A , device_map='''auto''' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowercase ( unittest.TestCase ):
@classmethod
def UpperCAmelCase__ (cls ):
lowerCamelCase_ : List[Any] = '''t5-small'''
lowerCamelCase_ : Optional[Any] = '''google/flan-t5-small''' # flan-t5 uses dense-act instead of dense-relu-dense
lowerCamelCase_ : List[str] = AutoTokenizer.from_pretrained(cls.model_name )
lowerCamelCase_ : Optional[Any] = '''Translate in German: Hello, my dog is cute'''
def UpperCAmelCase__ (self ):
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
from transformers import TaForConditionalGeneration
lowerCamelCase_ : Any = TaForConditionalGeneration._keep_in_fpaa_modules
lowerCamelCase_ : List[Any] = None
# test with `t5-small`
lowerCamelCase_ : int = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
lowerCamelCase_ : str = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Optional[Any] = model.generate(**A )
# test with `flan-t5-small`
lowerCamelCase_ : List[Any] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=A , device_map='''auto''' )
lowerCamelCase_ : int = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Optional[int] = model.generate(**A )
lowerCamelCase_ : Any = modules
def UpperCAmelCase__ (self ):
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
lowerCamelCase_ : Tuple = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
lowerCamelCase_ : Optional[Any] = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Dict = model.generate(**A )
# test with `flan-t5-small`
lowerCamelCase_ : List[str] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=A , device_map='''auto''' )
lowerCamelCase_ : Dict = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Tuple = model.generate(**A )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
# model_name
lowerCamelCase_ : Optional[int] = '''bigscience/bloom-560m'''
lowerCamelCase_ : Optional[int] = '''t5-small'''
# Different types of model
lowerCamelCase_ : List[Any] = AutoModel.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
# Sequence classification model
lowerCamelCase_ : Dict = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=A , device_map='''auto''' )
# CausalLM model
lowerCamelCase_ : int = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
# Seq2seq model
lowerCamelCase_ : int = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=A , device_map='''auto''' )
def UpperCAmelCase__ (self ):
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
def UpperCAmelCase__ (self ):
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = pipeline(
'''text-generation''' , model=self.model_name , model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
lowerCamelCase_ : List[str] = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['''generated_text'''] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=A , device_map='''balanced''' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
lowerCamelCase_ : Any = self.tokenizer(self.input_text , return_tensors='''pt''' )
# Second real batch
lowerCamelCase_ : Any = model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=A ) , self.EXPECTED_OUTPUTS )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = '''facebook/opt-350m'''
super().setUp()
def UpperCAmelCase__ (self ):
if version.parse(importlib.metadata.version('''bitsandbytes''' ) ) < version.parse('''0.37.0''' ):
return
# Step 1: freeze all parameters
lowerCamelCase_ : Any = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
lowerCamelCase_ : List[str] = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
lowerCamelCase_ : Optional[int] = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(A ) ):
lowerCamelCase_ : Dict = LoRALayer(module.q_proj , rank=1_6 )
lowerCamelCase_ : str = LoRALayer(module.k_proj , rank=1_6 )
lowerCamelCase_ : int = LoRALayer(module.v_proj , rank=1_6 )
# Step 3: dummy batch
lowerCamelCase_ : Union[str, Any] = self.tokenizer('''Test batch ''' , return_tensors='''pt''' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
lowerCamelCase_ : Optional[int] = model.forward(**A )
out.logits.norm().backward()
for module in model.modules():
if isinstance(A , A ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(A , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class __lowercase ( _lowercase ):
lowerCamelCase : Optional[Any] = "gpt2-xl"
lowerCamelCase : int = 3.3_1_9_1_8_5_4_8_5_4_1_5_2_1_8_7
| 318 | 0 |
from __future__ import annotations
__lowerCamelCase : Dict = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
__lowerCamelCase : Union[str, Any] = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[float] ) -> list[float]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = len(__UpperCamelCase )
for i in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = -1
for j in range(i + 1 , __UpperCamelCase ):
if arr[i] < arr[j]:
SCREAMING_SNAKE_CASE__ = arr[j]
break
result.append(__UpperCamelCase )
return result
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[float] ) -> list[float]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = []
for i, outer in enumerate(__UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = -1
for inner in arr[i + 1 :]:
if outer < inner:
SCREAMING_SNAKE_CASE__ = inner
break
result.append(__UpperCamelCase )
return result
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[float] ) -> list[float]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = len(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = [-1] * arr_size
for index in reversed(range(__UpperCamelCase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
SCREAMING_SNAKE_CASE__ = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
__lowerCamelCase : List[Any] = (
'''from __main__ import arr, next_greatest_element_slow, '''
'''next_greatest_element_fast, next_greatest_element'''
)
print(
'''next_greatest_element_slow():''',
timeit('''next_greatest_element_slow(arr)''', setup=setup),
)
print(
'''next_greatest_element_fast():''',
timeit('''next_greatest_element_fast(arr)''', setup=setup),
)
print(
''' next_greatest_element():''',
timeit('''next_greatest_element(arr)''', setup=setup),
)
| 350 | import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
@slow
def __a ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = AutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" , return_dict=_lowercase ).to(_lowercase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained("""google/mt5-small""" )
SCREAMING_SNAKE_CASE__ = tokenizer("""Hello there""" , return_tensors="""pt""" ).input_ids
SCREAMING_SNAKE_CASE__ = tokenizer("""Hi I am""" , return_tensors="""pt""" ).input_ids
SCREAMING_SNAKE_CASE__ = model(input_ids.to(_lowercase ) , labels=labels.to(_lowercase ) ).loss
SCREAMING_SNAKE_CASE__ = -(labels.shape[-1] * loss.item())
SCREAMING_SNAKE_CASE__ = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 204 | 0 |
from math import pi
def UpperCamelCase ( __lowercase : int ,__lowercase : int ):
'''simple docstring'''
return 2 * pi * radius * (angle / 3_60)
if __name__ == "__main__":
print(arc_length(90, 10))
| 140 | from collections import deque
from .hash_table import HashTable
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , *lowercase , **lowercase ):
"""simple docstring"""
super().__init__(*lowercase , **lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
A_ : int = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(lowercase )
A_ : int = self.values[key]
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return (
sum(self.charge_factor - len(lowercase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def lowerCAmelCase_ ( self , lowercase , lowercase=None ):
"""simple docstring"""
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(lowercase ) == 0
):
return key
return super()._collision_resolution(lowercase , lowercase )
| 140 | 1 |
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline | 33 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
A : str = logging.get_logger(__name__)
A : Any = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'encoder.layer_norm_for_extract': 'layer_norm_for_extract',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'label_embs_concat': 'label_embeddings_concat',
'mask_emb': 'masked_spec_embed',
'spk_proj': 'speaker_proj',
}
A : Optional[Any] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'label_embeddings_concat',
'speaker_proj',
'layer_norm_for_extract',
]
def __lowerCAmelCase ( a__ , a__ , a__ , a__ , a__ ) -> Dict:
for attribute in key.split('''.''' ):
__a = getattr(a__ , a__ )
if weight_type is not None:
__a = getattr(a__ , a__ ).shape
else:
__a = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__a = value
elif weight_type == "weight_g":
__a = value
elif weight_type == "weight_v":
__a = value
elif weight_type == "bias":
__a = value
else:
__a = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __lowerCAmelCase ( a__ , a__ ) -> List[str]:
__a = []
__a = fairseq_model.state_dict()
__a = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
__a = False
if "conv_layers" in name:
load_conv_layer(
a__ , a__ , a__ , a__ , hf_model.config.feat_extract_norm == '''group''' , )
__a = True
else:
for key, mapped_key in MAPPING.items():
__a = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key):
# special case since naming is very similar
continue
__a = True
if "*" in mapped_key:
__a = name.split(a__ )[0].split('''.''' )[-2]
__a = mapped_key.replace('''*''' , a__ )
if "weight_g" in name:
__a = '''weight_g'''
elif "weight_v" in name:
__a = '''weight_v'''
elif "bias" in name:
__a = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__a = '''weight'''
else:
__a = None
set_recursively(a__ , a__ , a__ , a__ , a__ )
continue
if not is_used:
unused_weights.append(a__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __lowerCAmelCase ( a__ , a__ , a__ , a__ , a__ ) -> int:
__a = full_name.split('''conv_layers.''' )[-1]
__a = name.split('''.''' )
__a = int(items[0] )
__a = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__a = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__a = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
__a = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
__a = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(a__ )
@torch.no_grad()
def __lowerCAmelCase ( a__ , a__ , a__=None , a__=None , a__=True ) -> Tuple:
if config_path is not None:
__a = UniSpeechSatConfig.from_pretrained(a__ )
else:
__a = UniSpeechSatConfig()
__a = ''''''
if is_finetuned:
__a = UniSpeechSatForCTC(a__ )
else:
__a = UniSpeechSatForPreTraining(a__ )
__a , __a , __a = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
__a = model[0].eval()
recursively_load_weights(a__ , a__ )
hf_wavavec.save_pretrained(a__ )
if __name__ == "__main__":
A : List[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
A : Dict = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
) | 33 | 1 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Tuple=13 , UpperCAmelCase__ : Union[str, Any]=7 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Optional[int]=99 , UpperCAmelCase__ : List[str]=32 , UpperCAmelCase__ : List[Any]=5 , UpperCAmelCase__ : int=4 , UpperCAmelCase__ : str=37 , UpperCAmelCase__ : Optional[int]="gelu" , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : Tuple=512 , UpperCAmelCase__ : Optional[int]=16 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : int=0.02 , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : Tuple=4 , UpperCAmelCase__ : Any=None , ) ->str:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = scope
def SCREAMING_SNAKE_CASE ( self : Dict) ->Union[str, Any]:
'''simple docstring'''
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length])
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
A__ = ids_tensor([self.batch_size] , self.num_choices)
A__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : int) ->Optional[Any]:
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any]) ->int:
'''simple docstring'''
A__ = LlamaModel(config=lowercase_)
model.to(lowercase_)
model.eval()
A__ = model(lowercase_ , attention_mask=lowercase_)
A__ = model(lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str , ) ->Dict:
'''simple docstring'''
A__ = True
A__ = LlamaModel(lowercase_)
model.to(lowercase_)
model.eval()
A__ = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , )
A__ = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , )
A__ = model(lowercase_ , attention_mask=lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , ) ->List[str]:
'''simple docstring'''
A__ = LlamaForCausalLM(config=lowercase_)
model.to(lowercase_)
model.eval()
A__ = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any , ) ->List[str]:
'''simple docstring'''
A__ = True
A__ = True
A__ = LlamaForCausalLM(config=lowercase_)
model.to(lowercase_)
model.eval()
# first forward pass
A__ = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , use_cache=lowercase_ , )
A__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A__ = ids_tensor((self.batch_size, 3) , config.vocab_size)
A__ = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
A__ = torch.cat([input_ids, next_tokens] , dim=-1)
A__ = torch.cat([input_mask, next_mask] , dim=-1)
A__ = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , output_hidden_states=lowercase_ , )['''hidden_states'''][0]
A__ = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , past_key_values=lowercase_ , output_hidden_states=lowercase_ , )['''hidden_states'''][0]
# select random slice
A__ = ids_tensor((1,) , output_from_past.shape[-1]).item()
A__ = output_from_no_past[:, -3:, random_slice_idx].detach()
A__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-3))
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
UpperCAmelCase__ = (LlamaForCausalLM,) if is_torch_available() else ()
UpperCAmelCase__ = (
{
'''feature-extraction''': LlamaModel,
'''text-classification''': LlamaForSequenceClassification,
'''text-generation''': LlamaForCausalLM,
'''zero-shot''': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str:
'''simple docstring'''
A__ = LlamaModelTester(self)
A__ = ConfigTester(self , config_class=lowercase_ , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self : str) ->Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Dict:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[str]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A__ = type
self.model_tester.create_and_check_model(*lowercase_)
def SCREAMING_SNAKE_CASE ( self : str) ->List[str]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = 3
A__ = input_dict['''input_ids''']
A__ = input_ids.ne(1).to(lowercase_)
A__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
A__ = LlamaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
A__ = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = 3
A__ = '''single_label_classification'''
A__ = input_dict['''input_ids''']
A__ = input_ids.ne(1).to(lowercase_)
A__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
A__ = LlamaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
A__ = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Tuple:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = 3
A__ = '''multi_label_classification'''
A__ = input_dict['''input_ids''']
A__ = input_ids.ne(1).to(lowercase_)
A__ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
A__ = LlamaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
A__ = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@unittest.skip('''LLaMA buffers include complex numbers, which breaks this test''')
def SCREAMING_SNAKE_CASE ( self : Dict) ->List[str]:
'''simple docstring'''
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)])
def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : Optional[int]) ->int:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = ids_tensor([1, 10] , config.vocab_size)
A__ = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size)
set_seed(42) # Fixed seed at init time so the two models get the same random weights
A__ = LlamaModel(lowercase_)
original_model.to(lowercase_)
original_model.eval()
A__ = original_model(lowercase_).last_hidden_state
A__ = original_model(lowercase_).last_hidden_state
set_seed(42) # Fixed seed at init time so the two models get the same random weights
A__ = {'''type''': scaling_type, '''factor''': 10.0}
A__ = LlamaModel(lowercase_)
scaled_model.to(lowercase_)
scaled_model.eval()
A__ = scaled_model(lowercase_).last_hidden_state
A__ = scaled_model(lowercase_).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
else:
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''')
@slow
def SCREAMING_SNAKE_CASE ( self : Any) ->Union[str, Any]:
'''simple docstring'''
A__ = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
A__ = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-7b-hf''' , device_map='''auto''')
A__ = model(torch.tensor([input_ids]))
# Expected mean on dim = -1
A__ = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]])
torch.testing.assert_close(out.mean(-1) , lowercase_ , atol=1e-2 , rtol=1e-2)
# slicing logits[0, 0, 0:30]
# fmt: off
A__ = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,])
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowercase_ , atol=1e-5 , rtol=1e-5)
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''')
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Any:
'''simple docstring'''
A__ = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
A__ = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-hf''' , device_map='''auto''')
A__ = model(torch.tensor(lowercase_))
# Expected mean on dim = -1
A__ = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]])
torch.testing.assert_close(out.mean(-1) , lowercase_ , atol=1e-2 , rtol=1e-2)
# slicing logits[0, 0, 0:30]
# fmt: off
A__ = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273])
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowercase_ , atol=1e-5 , rtol=1e-5)
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''')
@slow
def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
'''simple docstring'''
A__ = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
A__ = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' , device_map='''auto''')
A__ = model(torch.tensor(lowercase_))
# Expected mean on dim = -1
A__ = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]])
torch.testing.assert_close(out.mean(-1) , lowercase_ , atol=1e-2 , rtol=1e-2)
# slicing logits[0, 0, 0:30]
# fmt: off
A__ = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513])
# fmt: on
torch.testing.assert_close(out.mean(-1) , lowercase_ , atol=1e-2 , rtol=1e-2)
@unittest.skip(
'''Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test''')
@slow
def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]:
'''simple docstring'''
A__ = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
A__ = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-70b-hf''' , device_map='''auto''')
A__ = model(torch.tensor(lowercase_))
A__ = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa)
torch.testing.assert_close(out.mean(-1) , lowercase_ , atol=1e-2 , rtol=1e-2)
# fmt: off
A__ = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312])
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowercase_ , atol=1e-5 , rtol=1e-5)
@unittest.skip('''Model is curently gated''')
@slow
def SCREAMING_SNAKE_CASE ( self : Any) ->int:
'''simple docstring'''
A__ = '''Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'''
A__ = '''Simply put, the theory of relativity states that '''
A__ = LlamaTokenizer.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''')
A__ = tokenizer.encode(lowercase_ , return_tensors='''pt''')
A__ = LlamaForCausalLM.from_pretrained(
'''meta-llama/Llama-2-13b-chat-hf''' , device_map='''sequential''' , use_safetensors=lowercase_)
# greedy generation outputs
A__ = model.generate(lowercase_ , max_new_tokens=64 , top_p=lowercase_ , temperature=1 , do_sample=lowercase_)
A__ = tokenizer.decode(generated_ids[0] , skip_special_tokens=lowercase_)
self.assertEqual(lowercase_ , lowercase_)
| 14 |
"""simple docstring"""
class A_ :
"""simple docstring"""
def __init__( self :List[str] , lowercase_ :int , lowercase_ :Optional[int]=None , lowercase_ :List[str]=None ) -> str:
UpperCAmelCase = data
UpperCAmelCase = previous
UpperCAmelCase = next_node
def __str__( self :Optional[Any] ) -> str:
return f"""{self.data}"""
def UpperCAmelCase__ ( self :int ) -> int:
return self.data
def UpperCAmelCase__ ( self :List[str] ) -> Any:
return self.next
def UpperCAmelCase__ ( self :Tuple ) -> Optional[int]:
return self.previous
class A_ :
"""simple docstring"""
def __init__( self :Optional[Any] , lowercase_ :Optional[Any] ) -> str:
UpperCAmelCase = head
def __iter__( self :List[str] ) -> List[str]:
return self
def UpperCAmelCase__ ( self :int ) -> Any:
if not self.current:
raise StopIteration
else:
UpperCAmelCase = self.current.get_data()
UpperCAmelCase = self.current.get_next()
return value
class A_ :
"""simple docstring"""
def __init__( self :Union[str, Any] ) -> List[Any]:
UpperCAmelCase = None # First node in list
UpperCAmelCase = None # Last node in list
def __str__( self :List[Any] ) -> Optional[Any]:
UpperCAmelCase = self.head
UpperCAmelCase = []
while current is not None:
nodes.append(current.get_data() )
UpperCAmelCase = current.get_next()
return " ".join(str(lowercase_ ) for node in nodes )
def __contains__( self :str , lowercase_ :int ) -> str:
UpperCAmelCase = self.head
while current:
if current.get_data() == value:
return True
UpperCAmelCase = current.get_next()
return False
def __iter__( self :Tuple ) -> Dict:
return LinkedListIterator(self.head )
def UpperCAmelCase__ ( self :Optional[int] ) -> Optional[Any]:
if self.head:
return self.head.get_data()
return None
def UpperCAmelCase__ ( self :Union[str, Any] ) -> List[str]:
if self.tail:
return self.tail.get_data()
return None
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :Node ) -> None:
if self.head is None:
UpperCAmelCase = node
UpperCAmelCase = node
else:
self.insert_before_node(self.head , lowercase_ )
def UpperCAmelCase__ ( self :Any , lowercase_ :Node ) -> None:
if self.head is None:
self.set_head(lowercase_ )
else:
self.insert_after_node(self.tail , lowercase_ )
def UpperCAmelCase__ ( self :List[str] , lowercase_ :int ) -> None:
UpperCAmelCase = Node(lowercase_ )
if self.head is None:
self.set_head(lowercase_ )
else:
self.set_tail(lowercase_ )
def UpperCAmelCase__ ( self :int , lowercase_ :Node , lowercase_ :Node ) -> None:
UpperCAmelCase = node
UpperCAmelCase = node.previous
if node.get_previous() is None:
UpperCAmelCase = node_to_insert
else:
UpperCAmelCase = node_to_insert
UpperCAmelCase = node_to_insert
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :Node , lowercase_ :Node ) -> None:
UpperCAmelCase = node
UpperCAmelCase = node.next
if node.get_next() is None:
UpperCAmelCase = node_to_insert
else:
UpperCAmelCase = node_to_insert
UpperCAmelCase = node_to_insert
def UpperCAmelCase__ ( self :Any , lowercase_ :int , lowercase_ :int ) -> None:
UpperCAmelCase = 1
UpperCAmelCase = Node(lowercase_ )
UpperCAmelCase = self.head
while node:
if current_position == position:
self.insert_before_node(lowercase_ , lowercase_ )
return
current_position += 1
UpperCAmelCase = node.next
self.insert_after_node(self.tail , lowercase_ )
def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :int ) -> Node:
UpperCAmelCase = self.head
while node:
if node.get_data() == item:
return node
UpperCAmelCase = node.get_next()
raise Exception('Node not found' )
def UpperCAmelCase__ ( self :Any , lowercase_ :Optional[Any] ) -> Dict:
if (node := self.get_node(lowercase_ )) is not None:
if node == self.head:
UpperCAmelCase = self.head.get_next()
if node == self.tail:
UpperCAmelCase = self.tail.get_previous()
self.remove_node_pointers(lowercase_ )
@staticmethod
def UpperCAmelCase__ ( lowercase_ :Node ) -> None:
if node.get_next():
UpperCAmelCase = node.previous
if node.get_previous():
UpperCAmelCase = node.next
UpperCAmelCase = None
UpperCAmelCase = None
def UpperCAmelCase__ ( self :Union[str, Any] ) -> List[str]:
return self.head is None
def _lowerCAmelCase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 | 0 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': (
'''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'''
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = """trajectory_transformer"""
SCREAMING_SNAKE_CASE_ : Optional[int] = ["""past_key_values"""]
SCREAMING_SNAKE_CASE_ : List[str] = {
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowerCAmelCase__=100 , lowerCAmelCase__=5 , lowerCAmelCase__=1 , lowerCAmelCase__=1 , lowerCAmelCase__=249 , lowerCAmelCase__=6 , lowerCAmelCase__=17 , lowerCAmelCase__=25 , lowerCAmelCase__=4 , lowerCAmelCase__=4 , lowerCAmelCase__=128 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.00_06 , lowerCAmelCase__=512 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1e-12 , lowerCAmelCase__=1 , lowerCAmelCase__=True , lowerCAmelCase__=1 , lowerCAmelCase__=50_256 , lowerCAmelCase__=50_256 , **lowerCAmelCase__ , ) -> Dict:
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = action_weight
SCREAMING_SNAKE_CASE = reward_weight
SCREAMING_SNAKE_CASE = value_weight
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = block_size
SCREAMING_SNAKE_CASE = action_dim
SCREAMING_SNAKE_CASE = observation_dim
SCREAMING_SNAKE_CASE = transition_dim
SCREAMING_SNAKE_CASE = learning_rate
SCREAMING_SNAKE_CASE = n_layer
SCREAMING_SNAKE_CASE = n_head
SCREAMING_SNAKE_CASE = n_embd
SCREAMING_SNAKE_CASE = embd_pdrop
SCREAMING_SNAKE_CASE = attn_pdrop
SCREAMING_SNAKE_CASE = resid_pdrop
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = kaiming_initializer_range
SCREAMING_SNAKE_CASE = use_cache
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
| 350 |
"""simple docstring"""
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__UpperCamelCase = '''true'''
def lowercase (SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple=82 , SCREAMING_SNAKE_CASE_ : List[Any]=16 ) -> Union[str, Any]:
set_seed(42 )
SCREAMING_SNAKE_CASE = RegressionModel()
SCREAMING_SNAKE_CASE = deepcopy(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = RegressionDataset(length=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = DataLoader(SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
model.to(accelerator.device )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return model, ddp_model, dataloader
def lowercase (SCREAMING_SNAKE_CASE_ : Accelerator , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
SCREAMING_SNAKE_CASE = load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(SCREAMING_SNAKE_CASE_ : List[str] ):
SCREAMING_SNAKE_CASE = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
return outputs
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE = dataset.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(SCREAMING_SNAKE_CASE_ : Optional[int] ):
if use_longest:
return tokenizer.pad(SCREAMING_SNAKE_CASE_ , padding='longest' , return_tensors='pt' )
return tokenizer.pad(SCREAMING_SNAKE_CASE_ , padding='max_length' , max_length=1_28 , return_tensors='pt' )
return DataLoader(SCREAMING_SNAKE_CASE_ , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=16 )
def lowercase (SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any ) -> Dict:
SCREAMING_SNAKE_CASE = Accelerator(dispatch_batches=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = get_dataloader(SCREAMING_SNAKE_CASE_ , not dispatch_batches )
SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def lowercase (SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict ) -> Dict:
SCREAMING_SNAKE_CASE = []
for batch in dataloader:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = batch.values()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = [], []
for logit, targ in logits_and_targets:
logits.append(SCREAMING_SNAKE_CASE_ )
targs.append(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = torch.cat(SCREAMING_SNAKE_CASE_ ), torch.cat(SCREAMING_SNAKE_CASE_ )
return logits, targs
def lowercase (SCREAMING_SNAKE_CASE_ : Accelerator , SCREAMING_SNAKE_CASE_ : Optional[Any]=82 , SCREAMING_SNAKE_CASE_ : int=False , SCREAMING_SNAKE_CASE_ : List[Any]=False , SCREAMING_SNAKE_CASE_ : Union[str, Any]=16 ) -> List[str]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_basic_setup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = generate_predictions(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert (
len(SCREAMING_SNAKE_CASE_ ) == num_samples
), F'Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(SCREAMING_SNAKE_CASE_ )}'
def lowercase (SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = False ) -> Optional[int]:
SCREAMING_SNAKE_CASE = evaluate.load('glue' , 'mrpc' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_mrpc_setup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# First do baseline
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = setup['no']
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
for batch in dataloader:
batch.to(SCREAMING_SNAKE_CASE_ )
with torch.inference_mode():
SCREAMING_SNAKE_CASE = model(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=SCREAMING_SNAKE_CASE_ , references=batch['labels'] )
SCREAMING_SNAKE_CASE = metric.compute()
# Then do distributed
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
SCREAMING_SNAKE_CASE = model(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE = batch['labels']
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'
def lowercase () -> Dict:
SCREAMING_SNAKE_CASE = Accelerator(split_batches=SCREAMING_SNAKE_CASE_ , dispatch_batches=SCREAMING_SNAKE_CASE_ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`' )
test_mrpc(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
SCREAMING_SNAKE_CASE = Accelerator(split_batches=SCREAMING_SNAKE_CASE_ , dispatch_batches=SCREAMING_SNAKE_CASE_ )
if accelerator.is_local_main_process:
print(F'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99' )
test_torch_metrics(SCREAMING_SNAKE_CASE_ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
SCREAMING_SNAKE_CASE = Accelerator()
test_torch_metrics(SCREAMING_SNAKE_CASE_ , 5_12 )
accelerator.state._reset_state()
def lowercase (SCREAMING_SNAKE_CASE_ : int ) -> Union[str, Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 38 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A__ : List[str] ={
'''configuration_tapas''': ['''TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TapasConfig'''],
'''tokenization_tapas''': ['''TapasTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Dict =[
'''TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TapasForMaskedLM''',
'''TapasForQuestionAnswering''',
'''TapasForSequenceClassification''',
'''TapasModel''',
'''TapasPreTrainedModel''',
'''load_tf_weights_in_tapas''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Any =[
'''TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFTapasForMaskedLM''',
'''TFTapasForQuestionAnswering''',
'''TFTapasForSequenceClassification''',
'''TFTapasModel''',
'''TFTapasPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
A__ : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 70 |
'''simple docstring'''
from typing import List
from .keymap import KEYMAP, get_character
def lowerCamelCase ( __lowerCamelCase : str ) ->Optional[int]:
def decorator(__lowerCamelCase : int ):
_SCREAMING_SNAKE_CASE = getattr(__lowerCamelCase , """handle_key""" , [] )
handle += [key]
setattr(__lowerCamelCase , """handle_key""" , __lowerCamelCase )
return func
return decorator
def lowerCamelCase ( *__lowerCamelCase : List[str] ) ->Dict:
def decorator(__lowerCamelCase : int ):
_SCREAMING_SNAKE_CASE = getattr(__lowerCamelCase , """handle_key""" , [] )
handle += keys
setattr(__lowerCamelCase , """handle_key""" , __lowerCamelCase )
return func
return decorator
class a_ ( snake_case_ ):
'''simple docstring'''
def __new__( cls , A , A , A ) -> int:
_SCREAMING_SNAKE_CASE = super().__new__(cls , A , A , A )
if not hasattr(A , """key_handler""" ):
setattr(A , """key_handler""" , {} )
setattr(A , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
_SCREAMING_SNAKE_CASE = getattr(A , """handle_key""" , [] )
for key in handled_keys:
_SCREAMING_SNAKE_CASE = value
return new_cls
@staticmethod
def snake_case_( cls ) -> str:
_SCREAMING_SNAKE_CASE = get_character()
if char != KEYMAP["undefined"]:
_SCREAMING_SNAKE_CASE = ord(A )
_SCREAMING_SNAKE_CASE = cls.key_handler.get(A )
if handler:
_SCREAMING_SNAKE_CASE = char
return handler(cls )
else:
return None
def lowerCamelCase ( cls : Any ) ->Dict:
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 58 | 0 |
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Tuple = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def __lowerCAmelCase ():
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCamelCase__ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-classification/requirements.txt')
lowerCamelCase__ = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
lowerCamelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def __lowerCAmelCase (__lowerCAmelCase ):
with open(__lowerCAmelCase , "rb" ) as f:
_UpperCAmelCase : List[str] = Image.open(__lowerCAmelCase )
return im.convert("RGB" )
@dataclass
class lowerCAmelCase__ :
lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={
"help": "Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)."
} , )
lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
lowerCAmelCase : Optional[str] = field(default=UpperCAmelCase__ , metadata={"help": "A folder containing the training data."} )
lowerCAmelCase : Optional[str] = field(default=UpperCAmelCase__ , metadata={"help": "A folder containing the validation data."} )
lowerCAmelCase : Optional[float] = field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."} )
lowerCAmelCase : Optional[int] = field(
default=UpperCAmelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowerCAmelCase : Optional[int] = field(
default=UpperCAmelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def lowerCAmelCase__ ( self : int ) ->List[str]:
'''simple docstring'''
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"You must specify either a dataset name from the hub or a train and/or validation directory." )
@dataclass
class lowerCAmelCase__ :
lowerCAmelCase : str = field(
default="google/vit-base-patch16-224-in21k" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , )
lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(UpperCAmelCase__ )} , )
lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} )
lowerCAmelCase : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowerCAmelCase : str = field(default=UpperCAmelCase__ , metadata={"help": "Name or path of preprocessor config."} )
lowerCAmelCase : bool = field(
default=UpperCAmelCase__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
lowerCAmelCase : bool = field(
default=UpperCAmelCase__ , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : str = torch.stack([example["pixel_values"] for example in examples] )
_UpperCAmelCase : Tuple = torch.tensor([example["labels"] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def __lowerCAmelCase ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCAmelCase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_image_classification" , __lowerCAmelCase , __lowerCAmelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_UpperCAmelCase : Optional[Any] = training_args.get_process_log_level()
logger.setLevel(__lowerCAmelCase )
transformers.utils.logging.set_verbosity(__lowerCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
_UpperCAmelCase : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
_UpperCAmelCase : str = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task="image-classification" , use_auth_token=True if model_args.use_auth_token else None , )
else:
_UpperCAmelCase : List[Any] = {}
if data_args.train_dir is not None:
_UpperCAmelCase : str = os.path.join(data_args.train_dir , "**" )
if data_args.validation_dir is not None:
_UpperCAmelCase : Optional[Any] = os.path.join(data_args.validation_dir , "**" )
_UpperCAmelCase : Any = load_dataset(
"imagefolder" , data_files=__lowerCAmelCase , cache_dir=model_args.cache_dir , task="image-classification" , )
# If we don't have a validation split, split off a percentage of train as validation.
_UpperCAmelCase : int = None if "validation" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __lowerCAmelCase ) and data_args.train_val_split > 0.0:
_UpperCAmelCase : List[Any] = dataset["train"].train_test_split(data_args.train_val_split )
_UpperCAmelCase : List[str] = split["train"]
_UpperCAmelCase : Union[str, Any] = split["test"]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
_UpperCAmelCase : Optional[int] = dataset["train"].features["labels"].names
_UpperCAmelCase , _UpperCAmelCase : int = {}, {}
for i, label in enumerate(__lowerCAmelCase ):
_UpperCAmelCase : int = str(__lowerCAmelCase )
_UpperCAmelCase : str = label
# Load the accuracy metric from the datasets package
_UpperCAmelCase : int = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__lowerCAmelCase ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
_UpperCAmelCase : Dict = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(__lowerCAmelCase ) , labelaid=__lowerCAmelCase , idalabel=__lowerCAmelCase , finetuning_task="image-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_UpperCAmelCase : List[str] = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
_UpperCAmelCase : Dict = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
_UpperCAmelCase : int = image_processor.size["shortest_edge"]
else:
_UpperCAmelCase : int = (image_processor.size["height"], image_processor.size["width"])
_UpperCAmelCase : str = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
_UpperCAmelCase : Optional[int] = Compose(
[
RandomResizedCrop(__lowerCAmelCase ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
_UpperCAmelCase : Union[str, Any] = Compose(
[
Resize(__lowerCAmelCase ),
CenterCrop(__lowerCAmelCase ),
ToTensor(),
normalize,
] )
def train_transforms(__lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = [
_train_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]
]
return example_batch
def val_transforms(__lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = [_val_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
_UpperCAmelCase : Dict = (
dataset["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(__lowerCAmelCase )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
_UpperCAmelCase : Optional[Any] = (
dataset["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(__lowerCAmelCase )
# Initalize our trainer
_UpperCAmelCase : Union[str, Any] = Trainer(
model=__lowerCAmelCase , args=__lowerCAmelCase , train_dataset=dataset["train"] if training_args.do_train else None , eval_dataset=dataset["validation"] if training_args.do_eval else None , compute_metrics=__lowerCAmelCase , tokenizer=__lowerCAmelCase , data_collator=__lowerCAmelCase , )
# Training
if training_args.do_train:
_UpperCAmelCase : Any = None
if training_args.resume_from_checkpoint is not None:
_UpperCAmelCase : List[str] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCAmelCase : int = last_checkpoint
_UpperCAmelCase : Dict = trainer.train(resume_from_checkpoint=__lowerCAmelCase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_UpperCAmelCase : Dict = trainer.evaluate()
trainer.log_metrics("eval" , __lowerCAmelCase )
trainer.save_metrics("eval" , __lowerCAmelCase )
# Write model card and (optionally) push to hub
_UpperCAmelCase : int = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "image-classification",
"dataset": data_args.dataset_name,
"tags": ["image-classification", "vision"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__lowerCAmelCase )
else:
trainer.create_model_card(**__lowerCAmelCase )
if __name__ == "__main__":
main()
| 322 | 0 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
a =pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
inspect_dataset(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : List[Any] = path + '.py'
assert script_name in os.listdir(lowerCamelCase__ )
assert "__pycache__" not in os.listdir(lowerCamelCase__ )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
inspect_metric(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : Tuple = path + '.py'
assert script_name in os.listdir(lowerCamelCase__ )
assert "__pycache__" not in os.listdir(lowerCamelCase__ )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
__lowerCamelCase : int = get_dataset_config_info(lowerCamelCase__ , config_name=lowerCamelCase__ )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
with pytest.raises(lowerCamelCase__ ):
get_dataset_config_info(lowerCamelCase__ , config_name=lowerCamelCase__ )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
__lowerCamelCase : Tuple = get_dataset_config_names(lowerCamelCase__ )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
__lowerCamelCase : Tuple = get_dataset_infos(lowerCamelCase__ )
assert list(infos.keys() ) == expected_configs
__lowerCamelCase : List[Any] = expected_configs[0]
assert expected_config in infos
__lowerCamelCase : int = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
__lowerCamelCase : str = get_dataset_infos(lowerCamelCase__ )
assert expected_config in infos
__lowerCamelCase : List[str] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
with pytest.raises(lowerCamelCase__ ):
get_dataset_split_names(lowerCamelCase__ , config_name=lowerCamelCase__ )
| 73 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE( __lowercase = 1 , __lowercase = 1_0_0_0 ) -> int:
A: Any = 1
A: Optional[Any] = 0
for divide_by_number in range(__lowercase , digit + 1 ):
A: list[int] = []
A: List[Any] = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(__lowercase ):
A: Any = len(__lowercase )
A: Dict = divide_by_number
else:
has_been_divided.append(__lowercase )
A: str = now_divide * 1_0 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
def a__ ( __SCREAMING_SNAKE_CASE ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a__ ( __SCREAMING_SNAKE_CASE ) -> list[int]:
__lowerCAmelCase: Any = str(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: str = [n]
for i in range(1 , len(__SCREAMING_SNAKE_CASE ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def a__ ( __SCREAMING_SNAKE_CASE ) -> bool:
if len(str(__SCREAMING_SNAKE_CASE ) ) > 3:
if not is_prime(int(str(__SCREAMING_SNAKE_CASE )[-3:] ) ) or not is_prime(int(str(__SCREAMING_SNAKE_CASE )[:3] ) ):
return False
return True
def a__ ( __SCREAMING_SNAKE_CASE = 1_1 ) -> list[int]:
__lowerCAmelCase: list[int] = []
__lowerCAmelCase: Optional[Any] = 1_3
while len(__SCREAMING_SNAKE_CASE ) != count:
if validate(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase: int = list_truncated_nums(__SCREAMING_SNAKE_CASE )
if all(is_prime(__SCREAMING_SNAKE_CASE ) for i in list_nums ):
list_truncated_primes.append(__SCREAMING_SNAKE_CASE )
num += 2
return list_truncated_primes
def a__ ( ) -> int:
return sum(compute_truncated_primes(1_1 ) )
if __name__ == "__main__":
print(F'''{sum(compute_truncated_primes(11)) = }''')
| 356 |
"""simple docstring"""
from __future__ import annotations
from math import pi
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> dict[str, float]:
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if inductance < 0:
raise ValueError("Inductance cannot be negative" )
if frequency < 0:
raise ValueError("Frequency cannot be negative" )
if reactance < 0:
raise ValueError("Inductive reactance cannot be negative" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 108 | 0 |
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
UpperCAmelCase__ : Optional[int] = logging.get_logger(__name__)
class UpperCAmelCase :
'''simple docstring'''
__UpperCamelCase : List[Any] = None
@experimental
def lowerCamelCase__ ( a , a , a , a , a , a , a ) -> List[str]:
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
a , a , a , a , a , a , a )
return _map_with_joblib(a , a , a , a , a , a , a )
def lowerCamelCase__ ( a , a , a , a , a , a , a ) -> int:
_A: List[str] = num_proc if num_proc <= len(a ) else len(a )
_A: Any = [] # We organize the splits ourselve (contiguous splits)
for index in range(a ):
_A: Dict = len(a ) // num_proc
_A: Optional[Any] = len(a ) % num_proc
_A: Dict = div * index + min(a , a )
_A: int = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(a ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
f"""Error dividing inputs iterable among processes. """
f"""Total number of objects {len(a )}, """
f"""length: {sum(len(i[1] ) for i in split_kwds )}""" )
logger.info(
f"""Spawning {num_proc} processes for {len(a )} objects in slices of {[len(i[1] ) for i in split_kwds]}""" )
_A , _A: Optional[int] = None, None
if not disable_tqdm:
_A , _A: int = (RLock(),), tqdm.set_lock
with Pool(a , initargs=a , initializer=a ) as pool:
_A: Dict = pool.map(a , a )
logger.info(f"""Finished {num_proc} processes""" )
_A: List[Any] = [obj for proc_res in mapped for obj in proc_res]
logger.info(f"""Unpacked {len(a )} objects""" )
return mapped
def lowerCamelCase__ ( a , a , a , a , a , a , a ) -> Dict:
# progress bar is not yet supported for _map_with_joblib, because tqdm couldn't accurately be applied to joblib,
# and it requires monkey-patching joblib internal classes which is subject to change
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=a ):
return joblib.Parallel()(
joblib.delayed(a )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def lowerCamelCase__ ( a ) -> Any:
_A: Dict = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
_A: List[str] = None
| 121 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def lowerCamelCase__ ( a=None ) -> int:
_A: Union[str, Any] = argparse.ArgumentParser(add_help=a , allow_abbrev=a )
# The main config parser
_A: str = config_command_parser(a )
# The subparser to add commands to
_A: str = config_parser.add_subparsers(title='''subcommands''' , dest='''subcommand''' )
# Then add other parsers with the parent parser
default_command_parser(a , parents=[parent_parser] )
update_command_parser(a , parents=[parent_parser] )
return config_parser
def lowerCamelCase__ ( ) -> Union[str, Any]:
_A: Any = get_config_parser()
_A: Tuple = config_parser.parse_args()
if not hasattr(a , '''func''' ):
config_parser.print_help()
exit(1 )
# Run
args.func(a )
if __name__ == "__main__":
main()
| 121 | 1 |
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
UpperCAmelCase_ = get_logger(__name__)
UpperCAmelCase_ = Path(__file__).parent / 'model_card_template.md'
UpperCAmelCase_ = uuida().hex
UpperCAmelCase_ = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES
UpperCAmelCase_ = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES
UpperCAmelCase_ = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/'
def lowerCamelCase__ ( A__ : Dict = None ):
__lowerCamelCase = f'diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f'; torch/{_torch_version}'
if is_flax_available():
ua += f'; jax/{_jax_version}'
ua += f'; flax/{_flax_version}'
if is_onnx_available():
ua += f'; onnxruntime/{_onnxruntime_version}'
# CI will set this value to True
if os.environ.get("""DIFFUSERS_IS_CI""" , """""" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
ua += "; " + "; ".join(f'{k}/{v}' for k, v in user_agent.items() )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
ua += "; " + user_agent
return ua
def lowerCamelCase__ ( A__ : Optional[Any] , A__ : List[str] = None , A__ : Optional[Any] = None ):
if token is None:
__lowerCamelCase = HfFolder.get_token()
if organization is None:
__lowerCamelCase = whoami(__UpperCAmelCase )["""name"""]
return f'{username}/{model_id}'
else:
return f'{organization}/{model_id}'
def lowerCamelCase__ ( A__ : List[Any] , A__ : List[Any] ):
if not is_jinja_available():
raise ValueError(
"""Modelcard rendering is based on Jinja templates."""
""" Please make sure to have `jinja` installed before using `create_model_card`."""
""" To install it, please run `pip install Jinja2`.""" )
if hasattr(__UpperCAmelCase , """local_rank""" ) and args.local_rank not in [-1, 0]:
return
__lowerCamelCase = args.hub_token if hasattr(__UpperCAmelCase , """hub_token""" ) else None
__lowerCamelCase = get_full_repo_name(__UpperCAmelCase , token=__UpperCAmelCase )
__lowerCamelCase = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="""en""" , license="""apache-2.0""" , library_name="""diffusers""" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=__UpperCAmelCase , model_name=__UpperCAmelCase , repo_name=__UpperCAmelCase , dataset_name=args.dataset_name if hasattr(__UpperCAmelCase , """dataset_name""" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(__UpperCAmelCase , """gradient_accumulation_steps""" ) else None
) , adam_betaa=args.adam_betaa if hasattr(__UpperCAmelCase , """adam_beta1""" ) else None , adam_betaa=args.adam_betaa if hasattr(__UpperCAmelCase , """adam_beta2""" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(__UpperCAmelCase , """adam_weight_decay""" ) else None , adam_epsilon=args.adam_epsilon if hasattr(__UpperCAmelCase , """adam_epsilon""" ) else None , lr_scheduler=args.lr_scheduler if hasattr(__UpperCAmelCase , """lr_scheduler""" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(__UpperCAmelCase , """lr_warmup_steps""" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(__UpperCAmelCase , """ema_inv_gamma""" ) else None , ema_power=args.ema_power if hasattr(__UpperCAmelCase , """ema_power""" ) else None , ema_max_decay=args.ema_max_decay if hasattr(__UpperCAmelCase , """ema_max_decay""" ) else None , mixed_precision=args.mixed_precision , )
__lowerCamelCase = os.path.join(args.output_dir , """README.md""" )
model_card.save(__UpperCAmelCase )
def lowerCamelCase__ ( A__ : Tuple , A__ : List[str] = None ):
if resolved_file is None or commit_hash is not None:
return commit_hash
__lowerCamelCase = str(Path(__UpperCAmelCase ).as_posix() )
__lowerCamelCase = re.search(R"""snapshots/([^/]+)/""" , __UpperCAmelCase )
if search is None:
return None
__lowerCamelCase = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(__UpperCAmelCase ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
UpperCAmelCase_ = os.path.expanduser(
os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface'))
)
UpperCAmelCase_ = os.path.join(hf_cache_home, 'diffusers')
def lowerCamelCase__ ( A__ : Optional[int] = None , A__ : List[str] = None ):
if new_cache_dir is None:
__lowerCamelCase = DIFFUSERS_CACHE
if old_cache_dir is None:
__lowerCamelCase = old_diffusers_cache
__lowerCamelCase = Path(__UpperCAmelCase ).expanduser()
__lowerCamelCase = Path(__UpperCAmelCase ).expanduser()
for old_blob_path in old_cache_dir.glob("""**/blobs/*""" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
__lowerCamelCase = new_cache_dir / old_blob_path.relative_to(__UpperCAmelCase )
new_blob_path.parent.mkdir(parents=__UpperCAmelCase , exist_ok=__UpperCAmelCase )
os.replace(__UpperCAmelCase , __UpperCAmelCase )
try:
os.symlink(__UpperCAmelCase , __UpperCAmelCase )
except OSError:
logger.warning(
"""Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.""" )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
UpperCAmelCase_ = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt')
if not os.path.isfile(cache_version_file):
UpperCAmelCase_ = 0
else:
with open(cache_version_file) as f:
try:
UpperCAmelCase_ = int(f.read())
except ValueError:
UpperCAmelCase_ = 0
if cache_version < 1:
UpperCAmelCase_ = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '
'existing cached models. This is a one-time operation, you can interrupt it or run it '
'later by calling `diffusers.utils.hub_utils.move_cache()`.'
)
try:
move_cache()
except Exception as e:
UpperCAmelCase_ = '\n'.join(traceback.format_tb(e.__traceback__))
logger.error(
f"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '
'message and we will do our best to help.'
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, 'w') as f:
f.write('1')
except Exception:
logger.warning(
f"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
'the directory exists and can be written to.'
)
def lowerCamelCase__ ( A__ : Union[str, Any] , A__ : Dict = None ):
if variant is not None:
__lowerCamelCase = weights_name.split(""".""" )
__lowerCamelCase = splits[:-1] + [variant] + splits[-1:]
__lowerCamelCase = """.""".join(__UpperCAmelCase )
return weights_name
def lowerCamelCase__ ( A__ : str , *,
A__ : Optional[int] , A__ : Any , A__ : Optional[int] , A__ : Optional[Any] , A__ : Tuple , A__ : Dict , A__ : str , A__ : List[str] , A__ : Union[str, Any] , A__ : Dict , A__ : Union[str, Any]=None , ):
__lowerCamelCase = str(__UpperCAmelCase )
if os.path.isfile(__UpperCAmelCase ):
return pretrained_model_name_or_path
elif os.path.isdir(__UpperCAmelCase ):
if os.path.isfile(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) ):
# Load from a PyTorch checkpoint
__lowerCamelCase = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) ):
__lowerCamelCase = os.path.join(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return model_file
else:
raise EnvironmentError(
f'Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(__UpperCAmelCase ).base_version ) >= version.parse("""0.20.0""" )
):
try:
__lowerCamelCase = hf_hub_download(
__UpperCAmelCase , filename=_add_variant(__UpperCAmelCase , __UpperCAmelCase ) , cache_dir=__UpperCAmelCase , force_download=__UpperCAmelCase , proxies=__UpperCAmelCase , resume_download=__UpperCAmelCase , local_files_only=__UpperCAmelCase , use_auth_token=__UpperCAmelCase , user_agent=__UpperCAmelCase , subfolder=__UpperCAmelCase , revision=revision or commit_hash , )
warnings.warn(
f'Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.' , __UpperCAmelCase , )
return model_file
except: # noqa: E722
warnings.warn(
f'You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__UpperCAmelCase , __UpperCAmelCase )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(__UpperCAmelCase , __UpperCAmelCase )}\' so that the correct variant file can be added.' , __UpperCAmelCase , )
try:
# 2. Load model file as usual
__lowerCamelCase = hf_hub_download(
__UpperCAmelCase , filename=__UpperCAmelCase , cache_dir=__UpperCAmelCase , force_download=__UpperCAmelCase , proxies=__UpperCAmelCase , resume_download=__UpperCAmelCase , local_files_only=__UpperCAmelCase , use_auth_token=__UpperCAmelCase , user_agent=__UpperCAmelCase , subfolder=__UpperCAmelCase , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f'{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '
"""listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a """
"""token having permission to this repo with `use_auth_token` or log in with `huggingface-cli """
"""login`.""" )
except RevisionNotFoundError:
raise EnvironmentError(
f'{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '
"""this model name. Check the model page at """
f'\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.' )
except EntryNotFoundError:
raise EnvironmentError(
f'{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.' )
except HTTPError as err:
raise EnvironmentError(
f'There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}' )
except ValueError:
raise EnvironmentError(
f'We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'
f' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'
f' directory containing a file named {weights_name} or'
""" \nCheckout your internet connection or see how to run the library in"""
""" offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.""" )
except EnvironmentError:
raise EnvironmentError(
f'Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '
"""\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. """
f'Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '
f'containing a file named {weights_name}' )
| 355 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Union[str, Any] = 'yolos'
def __init__( self: Dict , UpperCamelCase_: List[Any]=7_68 , UpperCamelCase_: Tuple=12 , UpperCamelCase_: int=12 , UpperCamelCase_: int=30_72 , UpperCamelCase_: List[str]="gelu" , UpperCamelCase_: Union[str, Any]=0.0 , UpperCamelCase_: int=0.0 , UpperCamelCase_: Optional[int]=0.02 , UpperCamelCase_: Dict=1E-12 , UpperCamelCase_: List[Any]=[5_12, 8_64] , UpperCamelCase_: Optional[int]=16 , UpperCamelCase_: Any=3 , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: List[str]=1_00 , UpperCamelCase_: List[str]=True , UpperCamelCase_: Any=False , UpperCamelCase_: Optional[Any]=1 , UpperCamelCase_: Any=5 , UpperCamelCase_: Any=2 , UpperCamelCase_: Tuple=5 , UpperCamelCase_: str=2 , UpperCamelCase_: Any=0.1 , **UpperCamelCase_: Any , ):
super().__init__(**UpperCamelCase_ )
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = image_size
__lowerCamelCase = patch_size
__lowerCamelCase = num_channels
__lowerCamelCase = qkv_bias
__lowerCamelCase = num_detection_tokens
__lowerCamelCase = use_mid_position_embeddings
__lowerCamelCase = auxiliary_loss
# Hungarian matcher
__lowerCamelCase = class_cost
__lowerCamelCase = bbox_cost
__lowerCamelCase = giou_cost
# Loss coefficients
__lowerCamelCase = bbox_loss_coefficient
__lowerCamelCase = giou_loss_coefficient
__lowerCamelCase = eos_coefficient
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Tuple = version.parse('1.11')
@property
def lowerCAmelCase__ ( self: Any ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase__ ( self: Dict ):
return 1E-4
@property
def lowerCAmelCase__ ( self: Dict ):
return 12
| 29 | 0 |
"""simple docstring"""
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = """EncodecFeatureExtractor"""
SCREAMING_SNAKE_CASE_ : str = ("""T5Tokenizer""", """T5TokenizerFast""")
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.feature_extractor
SCREAMING_SNAKE_CASE = False
def __A ( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True ) -> List[str]:
return self.tokenizer.get_decoder_prompt_ids(task=lowerCAmelCase__ , language=lowerCAmelCase__ , no_timestamps=lowerCAmelCase__ )
def __call__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*lowerCAmelCase__ , **lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = kwargs.pop('audio' , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = kwargs.pop('sampling_rate' , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = kwargs.pop('text' , lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
SCREAMING_SNAKE_CASE = args[0]
SCREAMING_SNAKE_CASE = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if text is not None:
SCREAMING_SNAKE_CASE = self.tokenizer(lowerCAmelCase__ , **lowerCAmelCase__ )
if audio is not None:
SCREAMING_SNAKE_CASE = self.feature_extractor(lowerCAmelCase__ , *lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , **lowerCAmelCase__ )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
SCREAMING_SNAKE_CASE = audio_inputs['input_values']
if "padding_mask" in audio_inputs:
SCREAMING_SNAKE_CASE = audio_inputs['padding_mask']
return inputs
def __A ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = kwargs.pop('audio' , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = kwargs.pop('padding_mask' , lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
SCREAMING_SNAKE_CASE = args[0]
SCREAMING_SNAKE_CASE = args[1:]
if audio_values is not None:
return self._decode_audio(lowerCAmelCase__ , padding_mask=lowerCAmelCase__ )
else:
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __A ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[np.ndarray]:
SCREAMING_SNAKE_CASE = to_numpy(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = audio_values.shape
if padding_mask is None:
return list(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = to_numpy(lowerCAmelCase__ )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
SCREAMING_SNAKE_CASE = seq_len - padding_mask.shape[-1]
SCREAMING_SNAKE_CASE = 1 - self.feature_extractor.padding_value
SCREAMING_SNAKE_CASE = np.pad(lowerCAmelCase__ , ((0, 0), (0, difference)) , 'constant' , constant_values=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = audio_values.tolist()
for i in range(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
SCREAMING_SNAKE_CASE = sliced_audio.reshape(lowerCAmelCase__ , -1 )
return audio_values
| 113 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
def lowercase (SCREAMING_SNAKE_CASE_ : int ) -> List[str]:
SCREAMING_SNAKE_CASE = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
SCREAMING_SNAKE_CASE = 1_28
elif "12-12" in model_name:
SCREAMING_SNAKE_CASE = 12
SCREAMING_SNAKE_CASE = 12
elif "14-14" in model_name:
SCREAMING_SNAKE_CASE = 14
SCREAMING_SNAKE_CASE = 14
elif "16-16" in model_name:
SCREAMING_SNAKE_CASE = 16
SCREAMING_SNAKE_CASE = 16
else:
raise ValueError('Model not supported' )
SCREAMING_SNAKE_CASE = 'huggingface/label-files'
if "speech-commands" in model_name:
SCREAMING_SNAKE_CASE = 35
SCREAMING_SNAKE_CASE = 'speech-commands-v2-id2label.json'
else:
SCREAMING_SNAKE_CASE = 5_27
SCREAMING_SNAKE_CASE = 'audioset-id2label.json'
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='dataset' ) , 'r' ) )
SCREAMING_SNAKE_CASE = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
return config
def lowercase (SCREAMING_SNAKE_CASE_ : str ) -> List[Any]:
if "module.v" in name:
SCREAMING_SNAKE_CASE = name.replace('module.v' , 'audio_spectrogram_transformer' )
if "cls_token" in name:
SCREAMING_SNAKE_CASE = name.replace('cls_token' , 'embeddings.cls_token' )
if "dist_token" in name:
SCREAMING_SNAKE_CASE = name.replace('dist_token' , 'embeddings.distillation_token' )
if "pos_embed" in name:
SCREAMING_SNAKE_CASE = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
# transformer blocks
if "blocks" in name:
SCREAMING_SNAKE_CASE = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
SCREAMING_SNAKE_CASE = name.replace('attn' , 'attention.self' )
if "norm1" in name:
SCREAMING_SNAKE_CASE = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
SCREAMING_SNAKE_CASE = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE = name.replace('mlp.fc2' , 'output.dense' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
SCREAMING_SNAKE_CASE = name.replace('audio_spectrogram_transformer.norm' , 'audio_spectrogram_transformer.layernorm' )
# classifier head
if "module.mlp_head.0" in name:
SCREAMING_SNAKE_CASE = name.replace('module.mlp_head.0' , 'classifier.layernorm' )
if "module.mlp_head.1" in name:
SCREAMING_SNAKE_CASE = name.replace('module.mlp_head.1' , 'classifier.dense' )
return name
def lowercase (SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict ) -> Dict:
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "qkv" in key:
SCREAMING_SNAKE_CASE = key.split('.' )
SCREAMING_SNAKE_CASE = int(key_split[3] )
SCREAMING_SNAKE_CASE = config.hidden_size
if "weight" in key:
SCREAMING_SNAKE_CASE = val[:dim, :]
SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE = val[:dim]
SCREAMING_SNAKE_CASE = val[dim : dim * 2]
SCREAMING_SNAKE_CASE = val[-dim:]
else:
SCREAMING_SNAKE_CASE = val
return orig_state_dict
def lowercase (SCREAMING_SNAKE_CASE_ : int ) -> List[Any]:
SCREAMING_SNAKE_CASE = [
'module.v.head.weight',
'module.v.head.bias',
'module.v.head_dist.weight',
'module.v.head_dist.bias',
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def lowercase (SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False ) -> Optional[int]:
SCREAMING_SNAKE_CASE = get_audio_spectrogram_transformer_config(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = {
'ast-finetuned-audioset-10-10-0.4593': (
'https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.450': (
'https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448': (
'https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448-v2': (
'https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'
),
'ast-finetuned-audioset-12-12-0.447': (
'https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'
),
'ast-finetuned-audioset-14-14-0.443': (
'https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'
),
'ast-finetuned-audioset-16-16-0.442': (
'https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'
),
'ast-finetuned-speech-commands-v2': (
'https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'
),
}
# load original state_dict
SCREAMING_SNAKE_CASE = model_name_to_url[model_name]
SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location='cpu' )
# remove some keys
remove_keys(SCREAMING_SNAKE_CASE_ )
# rename some keys
SCREAMING_SNAKE_CASE = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# load 🤗 model
SCREAMING_SNAKE_CASE = ASTForAudioClassification(SCREAMING_SNAKE_CASE_ )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
SCREAMING_SNAKE_CASE = -4.2_67_73_93 if 'speech-commands' not in model_name else -6.84_59_78
SCREAMING_SNAKE_CASE = 4.5_68_99_74 if 'speech-commands' not in model_name else 5.5_65_45_26
SCREAMING_SNAKE_CASE = 10_24 if 'speech-commands' not in model_name else 1_28
SCREAMING_SNAKE_CASE = ASTFeatureExtractor(mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
if "speech-commands" in model_name:
SCREAMING_SNAKE_CASE = load_dataset('speech_commands' , 'v0.02' , split='validation' )
SCREAMING_SNAKE_CASE = dataset[0]['audio']['array']
else:
SCREAMING_SNAKE_CASE = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = torchaudio.load(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = waveform.squeeze().numpy()
SCREAMING_SNAKE_CASE = feature_extractor(SCREAMING_SNAKE_CASE_ , sampling_rate=1_60_00 , return_tensors='pt' )
# forward pass
SCREAMING_SNAKE_CASE = model(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
SCREAMING_SNAKE_CASE = torch.tensor([-0.87_60, -7.00_42, -8.66_02] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
SCREAMING_SNAKE_CASE = torch.tensor([-1.19_86, -7.09_03, -8.27_18] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
SCREAMING_SNAKE_CASE = torch.tensor([-2.61_28, -8.00_80, -9.43_44] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
SCREAMING_SNAKE_CASE = torch.tensor([-1.50_80, -7.45_34, -8.89_17] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
SCREAMING_SNAKE_CASE = torch.tensor([-0.50_50, -6.58_33, -8.08_43] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
SCREAMING_SNAKE_CASE = torch.tensor([-0.38_26, -7.03_36, -8.24_13] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
SCREAMING_SNAKE_CASE = torch.tensor([-1.21_13, -6.91_01, -8.34_70] )
elif model_name == "ast-finetuned-speech-commands-v2":
SCREAMING_SNAKE_CASE = torch.tensor([6.15_89, -8.05_66, -8.79_84] )
else:
raise ValueError('Unknown model name' )
if not torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ):
raise ValueError('Logits don\'t match' )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(F'Saving feature extractor to {pytorch_dump_folder_path}' )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
print('Pushing model and feature extractor to the hub...' )
model.push_to_hub(F'MIT/{model_name}' )
feature_extractor.push_to_hub(F'MIT/{model_name}' )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''ast-finetuned-audioset-10-10-0.4593''',
type=str,
help='''Name of the Audio Spectrogram Transformer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__UpperCamelCase = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 113 | 1 |
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowercase : Optional[int] = 1_6
lowercase : Optional[int] = 3_2
def A_ ( A__ ) -> Tuple:
return int(x / 2**20 )
class A__ :
"""simple docstring"""
def __enter__( self) -> Dict:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
a__ : Union[str, Any] = torch.cuda.memory_allocated()
return self
def __exit__( self , *lowercase) -> List[Any]:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
a__ : Optional[int] = torch.cuda.memory_allocated()
a__ : int = torch.cuda.max_memory_allocated()
a__ : Optional[int] = bamb(self.end - self.begin)
a__ : int = bamb(self.peak - self.begin)
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def A_ ( A__ , A__ = 16 , A__ = "bert-base-cased" , A__ = 320 , A__ = 160 , ) -> Dict:
a__ : int = AutoTokenizer.from_pretrained(A__ )
a__ : str = load_dataset(
'glue' , 'mrpc' , split={'train': F'train[:{n_train}]', 'validation': F'validation[:{n_val}]'} )
def tokenize_function(A__ ):
# max_length=None => use the model max length (it's actually the default)
a__ : int = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=A__ , max_length=A__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
a__ : List[str] = datasets.map(
A__ , batched=A__ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=A__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
a__ : Optional[int] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(A__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A__ , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(A__ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
a__ : Dict = DataLoader(
tokenized_datasets['train'] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
a__ : Optional[int] = DataLoader(
tokenized_datasets['validation'] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
return train_dataloader, eval_dataloader
def A_ ( A__ , A__ ) -> Any:
# Initialize accelerator
a__ : Optional[int] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a__ : Union[str, Any] = config['lr']
a__ : List[Any] = int(config['num_epochs'] )
a__ : Union[str, Any] = int(config['seed'] )
a__ : Tuple = int(config['batch_size'] )
a__ : int = args.model_name_or_path
set_seed(A__ )
a__ , a__ : Any = get_dataloaders(A__ , A__ , A__ , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a__ : Optional[int] = AutoModelForSequenceClassification.from_pretrained(A__ , return_dict=A__ )
# Instantiate optimizer
a__ : Optional[Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
a__ : Any = optimizer_cls(params=model.parameters() , lr=A__ )
if accelerator.state.deepspeed_plugin is not None:
a__ : str = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
a__ : str = 1
a__ : Optional[int] = (len(A__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
a__ : List[Any] = get_linear_schedule_with_warmup(
optimizer=A__ , num_warmup_steps=0 , num_training_steps=A__ , )
else:
a__ : Any = DummyScheduler(A__ , total_num_steps=A__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a__ , a__ , a__ , a__ , a__ : Optional[int] = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# We need to keep track of how many total steps we have iterated over
a__ : Optional[Any] = 0
# We also need to keep track of the stating epoch so files are named properly
a__ : Union[str, Any] = 0
# Now we train the model
a__ : List[str] = {}
for epoch in range(A__ , A__ ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(A__ ):
a__ : Optional[Any] = model(**A__ )
a__ : List[Any] = outputs.loss
a__ : Union[str, Any] = loss / gradient_accumulation_steps
accelerator.backward(A__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('Memory before entering the train : {}'.format(bamb(tracemalloc.begin ) ) )
accelerator.print('Memory consumed at the end of the train (end-begin): {}'.format(tracemalloc.used ) )
accelerator.print('Peak Memory consumed during the train (max-begin): {}'.format(tracemalloc.peaked ) )
accelerator.print(
'Total Peak Memory consumed during the train (max): {}'.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
a__ : List[str] = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F'epoch-{epoch}'] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'peak_memory_utilization.json' ) , 'w' ) as f:
json.dump(A__ , A__ )
def A_ ( ) -> int:
a__ : Union[str, Any] = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=A__ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=A__ , )
parser.add_argument(
'--output_dir' , type=A__ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--peak_memory_upper_bound' , type=A__ , default=A__ , help='The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.' , )
parser.add_argument(
'--n_train' , type=A__ , default=320 , help='Number of training examples to use.' , )
parser.add_argument(
'--n_val' , type=A__ , default=160 , help='Number of validation examples to use.' , )
parser.add_argument(
'--num_epochs' , type=A__ , default=1 , help='Number of train epochs.' , )
a__ : Dict = parser.parse_args()
a__ : List[str] = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(A__ , A__ )
if __name__ == "__main__":
main()
| 225 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowercase : List[str] = {
"""configuration_clip""": [
"""CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPConfig""",
"""CLIPOnnxConfig""",
"""CLIPTextConfig""",
"""CLIPVisionConfig""",
],
"""processing_clip""": ["""CLIPProcessor"""],
"""tokenization_clip""": ["""CLIPTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = ["""CLIPTokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[int] = ["""CLIPFeatureExtractor"""]
lowercase : Union[str, Any] = ["""CLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Dict = [
"""CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPModel""",
"""CLIPPreTrainedModel""",
"""CLIPTextModel""",
"""CLIPTextModelWithProjection""",
"""CLIPVisionModel""",
"""CLIPVisionModelWithProjection""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[str] = [
"""TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCLIPModel""",
"""TFCLIPPreTrainedModel""",
"""TFCLIPTextModel""",
"""TFCLIPVisionModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = [
"""FlaxCLIPModel""",
"""FlaxCLIPPreTrainedModel""",
"""FlaxCLIPTextModel""",
"""FlaxCLIPTextPreTrainedModel""",
"""FlaxCLIPVisionModel""",
"""FlaxCLIPVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
lowercase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 225 | 1 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : Tuple = {
"CarlCochet/trajectory-transformer-halfcheetah-medium-v2": (
"https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = 'trajectory_transformer'
__UpperCamelCase = ['past_key_values']
__UpperCamelCase = {
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Any , lowercase_ : Union[str, Any]=100 , lowercase_ : Dict=5 , lowercase_ : Optional[int]=1 , lowercase_ : Any=1 , lowercase_ : Optional[int]=249 , lowercase_ : Tuple=6 , lowercase_ : Dict=17 , lowercase_ : Tuple=25 , lowercase_ : Any=4 , lowercase_ : List[str]=4 , lowercase_ : Any=128 , lowercase_ : Any=0.1 , lowercase_ : int=0.1 , lowercase_ : Dict=0.1 , lowercase_ : Optional[Any]=0.00_06 , lowercase_ : Union[str, Any]=512 , lowercase_ : Dict=0.02 , lowercase_ : List[str]=1e-12 , lowercase_ : Union[str, Any]=1 , lowercase_ : Optional[Any]=True , lowercase_ : Optional[Any]=1 , lowercase_ : Any=50256 , lowercase_ : Dict=50256 , **lowercase_ : Union[str, Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE_ : Tuple = action_weight
SCREAMING_SNAKE_CASE_ : List[Any] = reward_weight
SCREAMING_SNAKE_CASE_ : Any = value_weight
SCREAMING_SNAKE_CASE_ : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : List[str] = block_size
SCREAMING_SNAKE_CASE_ : List[Any] = action_dim
SCREAMING_SNAKE_CASE_ : List[str] = observation_dim
SCREAMING_SNAKE_CASE_ : str = transition_dim
SCREAMING_SNAKE_CASE_ : Optional[Any] = learning_rate
SCREAMING_SNAKE_CASE_ : Dict = n_layer
SCREAMING_SNAKE_CASE_ : int = n_head
SCREAMING_SNAKE_CASE_ : Optional[int] = n_embd
SCREAMING_SNAKE_CASE_ : List[Any] = embd_pdrop
SCREAMING_SNAKE_CASE_ : List[str] = attn_pdrop
SCREAMING_SNAKE_CASE_ : Optional[Any] = resid_pdrop
SCREAMING_SNAKE_CASE_ : Tuple = initializer_range
SCREAMING_SNAKE_CASE_ : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : List[str] = kaiming_initializer_range
SCREAMING_SNAKE_CASE_ : Union[str, Any] = use_cache
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
| 91 |
"""simple docstring"""
import datasets
from .evaluate import evaluate
A: Optional[Any] = "\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n"
A: Optional[int] = "\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n"
A: int = "\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the CUAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\n 'aupr': Area Under the Precision-Recall curve\n 'prec_at_80_recall': Precision at 80% recall\n 'prec_at_90_recall': Precision at 90% recall\nExamples:\n >>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]\n >>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]\n >>> cuad_metric = datasets.load_metric(\"cuad\")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {
"""id""": datasets.Value("""string""" ),
"""prediction_text""": datasets.features.Sequence(datasets.Value("""string""" ) ),
},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://www.atticusprojectai.org/cuad"""] , reference_urls=["""https://www.atticusprojectai.org/cuad"""] , )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
UpperCAmelCase : int = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
UpperCAmelCase : Tuple = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
UpperCAmelCase : Optional[Any] = evaluate(dataset=_SCREAMING_SNAKE_CASE , predictions=_SCREAMING_SNAKE_CASE )
return score
| 109 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
_lowercase : Optional[int] =[
"openmmlab/upernet-convnext-tiny",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
_lowercase : List[Any] ="UperNetConfig"
class snake_case__ (nn.Module ):
"""simple docstring"""
def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase = 0 , __lowercase = False , __lowercase = 1 , ) -> None:
"""simple docstring"""
super().__init__()
a__ : List[Any] = nn.Convad(
in_channels=__lowercase , out_channels=__lowercase , kernel_size=__lowercase , padding=__lowercase , bias=__lowercase , dilation=__lowercase , )
a__ : Optional[int] = nn.BatchNormad(__lowercase )
a__ : Any = nn.ReLU()
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> torch.Tensor:
"""simple docstring"""
a__ : Dict = self.conv(__lowercase )
a__ : str = self.batch_norm(__lowercase )
a__ : Optional[int] = self.activation(__lowercase )
return output
class snake_case__ (nn.Module ):
"""simple docstring"""
def __init__( self , __lowercase , __lowercase , __lowercase ) -> None:
"""simple docstring"""
super().__init__()
a__ : Optional[Any] = [
nn.AdaptiveAvgPoolad(__lowercase ),
UperNetConvModule(__lowercase , __lowercase , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(__lowercase ) , __lowercase )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> torch.Tensor:
"""simple docstring"""
a__ : Union[str, Any] = input
for layer in self.layers:
a__ : str = layer(__lowercase )
return hidden_state
class snake_case__ (nn.Module ):
"""simple docstring"""
def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase ) -> None:
"""simple docstring"""
super().__init__()
a__ : Optional[Any] = pool_scales
a__ : int = align_corners
a__ : List[Any] = in_channels
a__ : Dict = channels
a__ : Optional[int] = []
for i, pool_scale in enumerate(__lowercase ):
a__ : int = UperNetPyramidPoolingBlock(pool_scale=__lowercase , in_channels=__lowercase , channels=__lowercase )
self.blocks.append(__lowercase )
self.add_module(str(__lowercase ) , __lowercase )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> List[torch.Tensor]:
"""simple docstring"""
a__ : Optional[Any] = []
for ppm in self.blocks:
a__ : List[str] = ppm(__lowercase )
a__ : Any = nn.functional.interpolate(
__lowercase , size=x.size()[2:] , mode="""bilinear""" , align_corners=self.align_corners )
ppm_outs.append(__lowercase )
return ppm_outs
class snake_case__ (nn.Module ):
"""simple docstring"""
def __init__( self , __lowercase , __lowercase ) -> Optional[int]:
"""simple docstring"""
super().__init__()
a__ : Dict = config
a__ : List[Any] = config.pool_scales # e.g. (1, 2, 3, 6)
a__ : Any = in_channels
a__ : Tuple = config.hidden_size
a__ : Union[str, Any] = False
a__ : int = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
a__ : Tuple = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
a__ : str = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
a__ : Any = nn.ModuleList()
a__ : Dict = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
a__ : Any = UperNetConvModule(__lowercase , self.channels , kernel_size=1 )
a__ : Optional[int] = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(__lowercase )
self.fpn_convs.append(__lowercase )
a__ : Optional[Any] = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
self.apply(self._init_weights )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Union[str, Any]:
"""simple docstring"""
if isinstance(__lowercase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Any:
"""simple docstring"""
a__ : Optional[Any] = inputs[-1]
a__ : Any = [x]
psp_outs.extend(self.psp_modules(__lowercase ) )
a__ : str = torch.cat(__lowercase , dim=1 )
a__ : Optional[Any] = self.bottleneck(__lowercase )
return output
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> torch.Tensor:
"""simple docstring"""
a__ : int = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(__lowercase ) )
# build top-down path
a__ : List[str] = len(__lowercase )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
a__ : str = laterals[i - 1].shape[2:]
a__ : Optional[int] = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=__lowercase , mode="""bilinear""" , align_corners=self.align_corners )
# build outputs
a__ : Optional[Any] = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
a__ : Optional[Any] = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode="""bilinear""" , align_corners=self.align_corners )
a__ : Any = torch.cat(__lowercase , dim=1 )
a__ : Optional[int] = self.fpn_bottleneck(__lowercase )
a__ : Optional[Any] = self.classifier(__lowercase )
return output
class snake_case__ (nn.Module ):
"""simple docstring"""
def __init__( self , __lowercase , __lowercase = 2 , __lowercase = 3 , __lowercase = 1 ) -> None:
"""simple docstring"""
super().__init__()
a__ : Union[str, Any] = config
a__ : Union[str, Any] = config.auxiliary_in_channels
a__ : Tuple = config.auxiliary_channels
a__ : str = config.auxiliary_num_convs
a__ : Tuple = config.auxiliary_concat_input
a__ : str = in_index
a__ : Tuple = (kernel_size // 2) * dilation
a__ : List[str] = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=__lowercase , padding=__lowercase , dilation=__lowercase ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=__lowercase , padding=__lowercase , dilation=__lowercase ) )
if self.num_convs == 0:
a__ : int = nn.Identity()
else:
a__ : int = nn.Sequential(*__lowercase )
if self.concat_input:
a__ : Optional[int] = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=__lowercase , padding=kernel_size // 2 )
a__ : Union[str, Any] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]:
"""simple docstring"""
self.apply(self._init_weights )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Optional[int]:
"""simple docstring"""
if isinstance(__lowercase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> torch.Tensor:
"""simple docstring"""
a__ : str = encoder_hidden_states[self.in_index]
a__ : List[Any] = self.convs(__lowercase )
if self.concat_input:
a__ : Tuple = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
a__ : List[Any] = self.classifier(__lowercase )
return output
class snake_case__ (A__ ):
"""simple docstring"""
__lowerCAmelCase :Union[str, Any] = UperNetConfig
__lowerCAmelCase :str = "pixel_values"
__lowerCAmelCase :int = True
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Dict:
"""simple docstring"""
if isinstance(__lowercase , __lowercase ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase=False ) -> List[str]:
"""simple docstring"""
if isinstance(__lowercase , __lowercase ):
a__ : Dict = value
_lowercase : Union[str, Any] =r"\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
_lowercase : List[Any] =r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes." , A__ , )
class snake_case__ (A__ ):
"""simple docstring"""
def __init__( self , __lowercase ) -> List[Any]:
"""simple docstring"""
super().__init__(__lowercase )
a__ : Optional[Any] = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
a__ : Tuple = UperNetHead(__lowercase , in_channels=self.backbone.channels )
a__ : Union[str, Any] = UperNetFCNHead(__lowercase ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format("""batch_size, sequence_length""" ) )
@replace_return_docstrings(output_type=__lowercase , config_class=_CONFIG_FOR_DOC )
def SCREAMING_SNAKE_CASE__( self , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , ) -> Union[tuple, SemanticSegmenterOutput]:
"""simple docstring"""
a__ : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
a__ : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a__ : Union[str, Any] = output_attentions if output_attentions is not None else self.config.output_attentions
a__ : int = self.backbone.forward_with_filtered_kwargs(
__lowercase , output_hidden_states=__lowercase , output_attentions=__lowercase )
a__ : Any = outputs.feature_maps
a__ : Tuple = self.decode_head(__lowercase )
a__ : Any = nn.functional.interpolate(__lowercase , size=pixel_values.shape[2:] , mode="""bilinear""" , align_corners=__lowercase )
a__ : Tuple = None
if self.auxiliary_head is not None:
a__ : Dict = self.auxiliary_head(__lowercase )
a__ : int = nn.functional.interpolate(
__lowercase , size=pixel_values.shape[2:] , mode="""bilinear""" , align_corners=__lowercase )
a__ : List[Any] = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError("""The number of labels should be greater than one""" )
else:
# compute weighted loss
a__ : Optional[Any] = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
a__ : int = loss_fct(__lowercase , __lowercase )
a__ : List[Any] = loss_fct(__lowercase , __lowercase )
a__ : Optional[int] = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
a__ : Tuple = (logits,) + outputs[1:]
else:
a__ : Union[str, Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=__lowercase , logits=__lowercase , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 357 |
def lowerCAmelCase_ ( _lowercase : int) -> int:
"""simple docstring"""
if not isinstance(_lowercase , _lowercase):
raise TypeError("""only integers accepted as input""")
else:
a__ : Any = str(abs(_lowercase))
a__ : str = [list(_lowercase) for char in range(len(_lowercase))]
for index in range(len(_lowercase)):
num_transpositions[index].pop(_lowercase)
return max(
int("""""".join(list(_lowercase))) for transposition in num_transpositions)
if __name__ == "__main__":
__import__("doctest").testmod()
| 266 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class lowerCamelCase :
'''simple docstring'''
def __init__( self : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : str=13 , lowerCAmelCase_ : List[str]=7 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Tuple=99 , lowerCAmelCase_ : Union[str, Any]=32 , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : Dict=4 , lowerCAmelCase_ : List[str]=37 , lowerCAmelCase_ : Tuple="gelu" , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : str=0.1 , lowerCAmelCase_ : Any=5_12 , lowerCAmelCase_ : Union[str, Any]=16 , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : str=0.02 , lowerCAmelCase_ : int=False , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : List[str]="None" , lowerCAmelCase_ : Union[str, Any]=3 , lowerCAmelCase_ : Tuple=4 , lowerCAmelCase_ : Tuple=None , ) -> Optional[Any]:
'''simple docstring'''
A__ : Any =parent
A__ : Any =batch_size
A__ : Tuple =seq_length
A__ : Tuple =is_training
A__ : List[Any] =use_input_mask
A__ : int =use_token_type_ids
A__ : List[Any] =use_labels
A__ : List[str] =vocab_size
A__ : Optional[int] =hidden_size
A__ : Optional[int] =num_hidden_layers
A__ : str =num_attention_heads
A__ : Tuple =intermediate_size
A__ : Union[str, Any] =hidden_act
A__ : Tuple =hidden_dropout_prob
A__ : Optional[Any] =attention_probs_dropout_prob
A__ : Tuple =max_position_embeddings
A__ : str =type_vocab_size
A__ : Dict =type_sequence_label_size
A__ : List[Any] =initializer_range
A__ : Any =num_labels
A__ : Optional[Any] =num_choices
A__ : Optional[int] =relative_attention
A__ : Tuple =position_biased_input
A__ : Union[str, Any] =pos_att_type
A__ : Union[str, Any] =scope
def lowercase__ ( self : Dict ) -> str:
'''simple docstring'''
A__ : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Tuple =None
if self.use_input_mask:
A__ : Dict =random_attention_mask([self.batch_size, self.seq_length] )
A__ : Dict =None
if self.use_token_type_ids:
A__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ : Union[str, Any] =None
A__ : List[Any] =None
A__ : Optional[Any] =None
if self.use_labels:
A__ : str =ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : List[Any] =DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=lowerCAmelCase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
A__ : List[str] =TFDebertaVaModel(config=lowerCAmelCase_ )
A__ : Any ={"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A__ : List[str] =[input_ids, input_mask]
A__ : Optional[int] =model(lowerCAmelCase_ )
A__ : Union[str, Any] =model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple ) -> Optional[int]:
'''simple docstring'''
A__ : List[Any] =TFDebertaVaForMaskedLM(config=lowerCAmelCase_ )
A__ : Optional[int] ={
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
A__ : List[str] =model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict ) -> List[str]:
'''simple docstring'''
A__ : Any =self.num_labels
A__ : List[str] =TFDebertaVaForSequenceClassification(config=lowerCAmelCase_ )
A__ : Optional[int] ={
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
A__ : int =model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict ) -> Any:
'''simple docstring'''
A__ : int =self.num_labels
A__ : Tuple =TFDebertaVaForTokenClassification(config=lowerCAmelCase_ )
A__ : int ={
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
A__ : str =model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : str ) -> List[str]:
'''simple docstring'''
A__ : Optional[Any] =TFDebertaVaForQuestionAnswering(config=lowerCAmelCase_ )
A__ : List[Any] ={
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
A__ : Union[str, Any] =model(lowerCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
A__ : Union[str, Any] =self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : str =config_and_inputs
A__ : Optional[int] ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase ( A_ , A_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
__snake_case = (
{
'''feature-extraction''': TFDebertaVaModel,
'''fill-mask''': TFDebertaVaForMaskedLM,
'''question-answering''': TFDebertaVaForQuestionAnswering,
'''text-classification''': TFDebertaVaForSequenceClassification,
'''token-classification''': TFDebertaVaForTokenClassification,
'''zero-shot''': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
__snake_case = False
__snake_case = False
def lowercase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
A__ : Any =TFDebertaVaModelTester(self )
A__ : str =ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def lowercase__ ( self : Dict ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : Any ) -> List[str]:
'''simple docstring'''
A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowercase__ ( self : int ) -> str:
'''simple docstring'''
A__ : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase_ )
def lowercase__ ( self : int ) -> List[str]:
'''simple docstring'''
A__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase_ )
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
A__ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase_ )
def lowercase__ ( self : int ) -> int:
'''simple docstring'''
A__ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase_ )
@slow
def lowercase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
A__ : str =TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
self.assertIsNotNone(lowerCAmelCase_ )
@require_tf
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason="""Model not available yet""" )
def lowercase__ ( self : str ) -> Tuple:
'''simple docstring'''
pass
@slow
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
A__ : Tuple =TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
A__ : Union[str, Any] =tf.constant([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
A__ : str =tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
A__ : Optional[int] =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
A__ : Optional[Any] =tf.constant(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1e-4 )
| 134 |
from math import isqrt
def _lowerCamelCase( lowercase__ ) -> bool:
'''simple docstring'''
return all(number % divisor != 0 for divisor in range(2 , isqrt(lowercase__ ) + 1 ) )
def _lowerCamelCase( lowercase__ = 1_0**6 ) -> int:
'''simple docstring'''
__lowercase= 0
__lowercase= 1
__lowercase= 7
while prime_candidate < max_prime:
primes_count += is_prime(lowercase__ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'{solution() = }')
| 295 | 0 |
import random
class lowercase_ :
@staticmethod
def UpperCamelCase_ ( A__ : str ) -> Optional[Any]:
_snake_case = [ord(_UpperCamelCase ) for i in text]
_snake_case = []
_snake_case = []
for i in plain:
_snake_case = random.randint(1 , 300 )
_snake_case = (i + k) * k
cipher.append(_UpperCamelCase )
key.append(_UpperCamelCase )
return cipher, key
@staticmethod
def UpperCamelCase_ ( A__ : list[int] , A__ : list[int] ) -> Optional[int]:
_snake_case = []
for i in range(len(_UpperCamelCase ) ):
_snake_case = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(_UpperCamelCase ) )
return "".join(_UpperCamelCase )
if __name__ == "__main__":
__A = Onepad().encrypt('''Hello''')
print(c, k)
print(Onepad().decrypt(c, k))
| 358 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
__A = logging.getLogger(__name__)
@dataclass
class lowercase_ :
UpperCamelCase_ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCamelCase_ : Optional[str] = field(
default=__lowercase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCamelCase_ : Optional[str] = field(
default=__lowercase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCamelCase_ : Optional[str] = field(
default=__lowercase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
UpperCamelCase_ : bool = field(default=__lowercase , metadata={"help": "Whether tp freeze the encoder."} )
UpperCamelCase_ : bool = field(default=__lowercase , metadata={"help": "Whether to freeze the embeddings."} )
@dataclass
class lowercase_ :
UpperCamelCase_ : str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
UpperCamelCase_ : Optional[str] = field(
default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , )
UpperCamelCase_ : Optional[int] = field(
default=1_0_2_4 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCamelCase_ : Optional[int] = field(
default=1_2_8 , metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCamelCase_ : Optional[int] = field(
default=1_4_2 , metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
} , )
UpperCamelCase_ : Optional[int] = field(
default=1_4_2 , metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCamelCase_ : Optional[int] = field(default=-1 , metadata={"help": "# training examples. -1 means use all."} )
UpperCamelCase_ : Optional[int] = field(default=-1 , metadata={"help": "# validation examples. -1 means use all."} )
UpperCamelCase_ : Optional[int] = field(default=-1 , metadata={"help": "# test examples. -1 means use all."} )
UpperCamelCase_ : Optional[str] = field(default=__lowercase , metadata={"help": "Source language id for translation."} )
UpperCamelCase_ : Optional[str] = field(default=__lowercase , metadata={"help": "Target language id for translation."} )
UpperCamelCase_ : Optional[int] = field(default=__lowercase , metadata={"help": "# num_beams to use for evaluation."} )
UpperCamelCase_ : bool = field(
default=__lowercase , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , )
def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
logger.info(F"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(F""" {key} = {metrics[key]}""" )
save_json(_UpperCamelCase , os.path.join(_UpperCamelCase , F"""{split}_results.json""" ) )
def snake_case_() -> List[Any]:
"""simple docstring"""
_snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_snake_case, _snake_case, _snake_case = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_snake_case, _snake_case, _snake_case = parser.parse_args_into_dataclasses()
check_output_dir(_UpperCamelCase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , _UpperCamelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_snake_case = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_snake_case = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
assert hasattr(_UpperCamelCase , _UpperCamelCase ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(_UpperCamelCase , _UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) )
_snake_case = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_snake_case = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=_UpperCamelCase , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(_UpperCamelCase , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
_snake_case = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_UpperCamelCase , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_UpperCamelCase , _UpperCamelCase ):
_snake_case = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
_snake_case = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_UpperCamelCase )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
_snake_case = SeqaSeqDataset
# Get datasets
_snake_case = (
dataset_class(
_UpperCamelCase , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_train
else None
)
_snake_case = (
dataset_class(
_UpperCamelCase , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
_snake_case = (
dataset_class(
_UpperCamelCase , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
_snake_case = (
build_compute_metrics_fn(data_args.task , _UpperCamelCase ) if training_args.predict_with_generate else None
)
_snake_case = SeqaSeqTrainer(
model=_UpperCamelCase , args=_UpperCamelCase , data_args=_UpperCamelCase , train_dataset=_UpperCamelCase , eval_dataset=_UpperCamelCase , data_collator=SeqaSeqDataCollator(
_UpperCamelCase , _UpperCamelCase , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_UpperCamelCase , tokenizer=_UpperCamelCase , )
_snake_case = {}
# Training
if training_args.do_train:
logger.info('''*** Train ***''' )
_snake_case = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
_snake_case = train_result.metrics
_snake_case = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('''train''' , _UpperCamelCase , training_args.output_dir )
all_metrics.update(_UpperCamelCase )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_snake_case = trainer.evaluate(metric_key_prefix='''val''' )
_snake_case = data_args.n_val
_snake_case = round(metrics['''val_loss'''] , 4 )
if trainer.is_world_process_zero():
handle_metrics('''val''' , _UpperCamelCase , training_args.output_dir )
all_metrics.update(_UpperCamelCase )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
_snake_case = trainer.predict(test_dataset=_UpperCamelCase , metric_key_prefix='''test''' )
_snake_case = test_output.metrics
_snake_case = data_args.n_test
if trainer.is_world_process_zero():
_snake_case = round(metrics['''test_loss'''] , 4 )
handle_metrics('''test''' , _UpperCamelCase , training_args.output_dir )
all_metrics.update(_UpperCamelCase )
if training_args.predict_with_generate:
_snake_case = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase )
_snake_case = lmap(str.strip , _UpperCamelCase )
write_txt_file(_UpperCamelCase , os.path.join(training_args.output_dir , '''test_generations.txt''' ) )
if trainer.is_world_process_zero():
save_json(_UpperCamelCase , os.path.join(training_args.output_dir , '''all_results.json''' ) )
return all_metrics
def snake_case_(_UpperCamelCase ) -> List[str]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 278 | 0 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
__snake_case = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def a ( ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :int = Github(os.environ['''GITHUB_TOKEN'''] )
UpperCamelCase__ :Optional[int] = g.get_repo('''huggingface/diffusers''' )
UpperCamelCase__ :Optional[int] = repo.get_issues(state='''open''' )
for issue in open_issues:
UpperCamelCase__ :Optional[int] = sorted(issue.get_comments() , key=lambda __a : i.created_at , reverse=__a )
UpperCamelCase__ :Optional[int] = comments[0] if len(__a ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main() | 97 |
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
__snake_case = '''
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
'''
__snake_case = '''
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results[\'pearsonr\'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
[\'p-value\', \'pearsonr\']
>>> print(round(results[\'pearsonr\'], 2))
-0.74
>>> print(round(results[\'p-value\'], 2))
0.15
'''
__snake_case = '''
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False ):
'''simple docstring'''
if return_pvalue:
UpperCamelCase__ :Any = pearsonr(UpperCamelCase_ , UpperCamelCase_ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(UpperCamelCase_ , UpperCamelCase_ )[0] )} | 97 | 1 |
"""simple docstring"""
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
UpperCamelCase__ : Tuple = trt.Logger(trt.Logger.WARNING)
UpperCamelCase__ : Dict = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
UpperCamelCase__ : List[Any] = logging.getLogger(__name__)
UpperCamelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--onnx_model_path""",
default=None,
type=str,
required=True,
help="""Path to ONNX model: """,
)
parser.add_argument(
"""--output_dir""",
default=None,
type=str,
required=True,
help="""The output directory where the model checkpoints and predictions will be written.""",
)
# Other parameters
parser.add_argument(
"""--tokenizer_name""",
default="""""",
type=str,
required=True,
help="""Pretrained tokenizer name or path if not the same as model_name""",
)
parser.add_argument(
"""--version_2_with_negative""",
action="""store_true""",
help="""If true, the SQuAD examples contain some that do not have an answer.""",
)
parser.add_argument(
"""--null_score_diff_threshold""",
type=float,
default=0.0,
help="""If null_score - best_non_null is greater than the threshold predict null.""",
)
parser.add_argument(
"""--max_seq_length""",
default=384,
type=int,
help=(
"""The maximum total input sequence length after WordPiece tokenization. Sequences """
"""longer than this will be truncated, and sequences shorter than this will be padded."""
),
)
parser.add_argument(
"""--doc_stride""",
default=128,
type=int,
help="""When splitting up a long document into chunks, how much stride to take between chunks.""",
)
parser.add_argument("""--per_device_eval_batch_size""", default=8, type=int, help="""Batch size per GPU/CPU for evaluation.""")
parser.add_argument(
"""--n_best_size""",
default=20,
type=int,
help="""The total number of n-best predictions to generate in the nbest_predictions.json output file.""",
)
parser.add_argument(
"""--max_answer_length""",
default=30,
type=int,
help=(
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
),
)
parser.add_argument("""--seed""", type=int, default=42, help="""random seed for initialization""")
parser.add_argument(
"""--dataset_name""",
type=str,
default=None,
required=True,
help="""The name of the dataset to use (via the datasets library).""",
)
parser.add_argument(
"""--dataset_config_name""",
type=str,
default=None,
help="""The configuration name of the dataset to use (via the datasets library).""",
)
parser.add_argument(
"""--preprocessing_num_workers""", type=int, default=4, help="""A csv or a json file containing the training data."""
)
parser.add_argument("""--overwrite_cache""", action="""store_true""", help="""Overwrite the cached training and evaluation sets""")
parser.add_argument(
"""--fp16""",
action="""store_true""",
help="""Whether to use 16-bit (mixed) precision instead of 32-bit""",
)
parser.add_argument(
"""--int8""",
action="""store_true""",
help="""Whether to use INT8""",
)
UpperCamelCase__ : int = parser.parse_args()
if args.tokenizer_name:
UpperCamelCase__ : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported by this script."""
"""You can do it from another script, save it, and load it from here, using --tokenizer_name."""
)
logger.info("""Training/evaluation parameters %s""", args)
UpperCamelCase__ : Optional[int] = args.per_device_eval_batch_size
UpperCamelCase__ : List[str] = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
UpperCamelCase__ : Any = True
UpperCamelCase__ : List[Any] = 'temp_engine/bert-fp32.engine'
if args.fpaa:
UpperCamelCase__ : Dict = 'temp_engine/bert-fp16.engine'
if args.inta:
UpperCamelCase__ : int = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists("""temp_engine"""):
os.makedirs("""temp_engine""")
UpperCamelCase__ : Dict = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, """rb""") as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
UpperCamelCase__ : Tuple = [network.get_input(i) for i in range(network.num_inputs)]
UpperCamelCase__ : int = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
UpperCamelCase__ : Optional[Any] = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
UpperCamelCase__ : Dict = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
UpperCamelCase__ : List[Any] = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, """wb""") as f:
f.write(engine.serialize())
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) -> Dict:
"""simple docstring"""
a = np.asarray(inputs['''input_ids'''], dtype=np.intaa )
a = np.asarray(inputs['''attention_mask'''], dtype=np.intaa )
a = np.asarray(inputs['''token_type_ids'''], dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0], input_ids.ravel(), _A )
cuda.memcpy_htod_async(d_inputs[1], attention_mask.ravel(), _A )
cuda.memcpy_htod_async(d_inputs[2], token_type_ids.ravel(), _A )
# start time
a = time.time()
# Run inference
context.execute_async(
bindings=[int(_A ) for d_inp in d_inputs] + [int(_A ), int(_A )], stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(_A, _A, _A )
cuda.memcpy_dtoh_async(_A, _A, _A )
# Synchronize the stream and take time
stream.synchronize()
# end time
a = time.time()
a = end_time - start_time
a = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
UpperCamelCase__ : Dict = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""",
datefmt="""%m/%d/%Y %H:%M:%S""",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
UpperCamelCase__ : Optional[Any] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError("""Evaluation requires a dataset name""")
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
UpperCamelCase__ : Any = raw_datasets['validation'].column_names
UpperCamelCase__ : Any = 'question' if 'question' in column_names else column_names[0]
UpperCamelCase__ : Optional[int] = 'context' if 'context' in column_names else column_names[1]
UpperCamelCase__ : Tuple = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
UpperCamelCase__ : Optional[Any] = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"
F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
UpperCamelCase__ : int = min(args.max_seq_length, tokenizer.model_max_length)
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Union[str, Any]:
"""simple docstring"""
a = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
a = tokenizer(
examples[question_column_name if pad_on_right else context_column_name], examples[context_column_name if pad_on_right else question_column_name], truncation='''only_second''' if pad_on_right else '''only_first''', max_length=_A, stride=args.doc_stride, return_overflowing_tokens=_A, return_offsets_mapping=_A, padding='''max_length''', )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
a = tokenized_examples.pop('''overflow_to_sample_mapping''' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
a = []
for i in range(len(tokenized_examples['''input_ids'''] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
a = tokenized_examples.sequence_ids(_A )
a = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
a = sample_mapping[i]
tokenized_examples["example_id"].append(examples['''id'''][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
a = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['''offset_mapping'''][i] )
]
return tokenized_examples
UpperCamelCase__ : Optional[Any] = raw_datasets['validation']
# Validation Feature Creation
UpperCamelCase__ : List[Any] = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="""Running tokenizer on validation dataset""",
)
UpperCamelCase__ : int = default_data_collator
UpperCamelCase__ : Optional[Any] = eval_dataset.remove_columns(["""example_id""", """offset_mapping"""])
UpperCamelCase__ : Tuple = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_="eval" ) -> Dict:
"""simple docstring"""
a = postprocess_qa_predictions(
examples=_A, features=_A, predictions=_A, version_2_with_negative=args.version_2_with_negative, n_best_size=args.n_best_size, max_answer_length=args.max_answer_length, null_score_diff_threshold=args.null_score_diff_threshold, output_dir=args.output_dir, prefix=_A, )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
a = [
{'''id''': k, '''prediction_text''': v, '''no_answer_probability''': 0.0} for k, v in predictions.items()
]
else:
a = [{'''id''': k, '''prediction_text''': v} for k, v in predictions.items()]
a = [{'''id''': ex['''id'''], '''answers''': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=_A, label_ids=_A )
UpperCamelCase__ : Optional[int] = load_metric("""squad_v2""" if args.version_2_with_negative else """squad""")
# Evaluation!
logger.info("""Loading ONNX model %s for evaluation""", args.onnx_model_path)
with open(engine_name, """rb""") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Dict:
"""simple docstring"""
return trt.volume(engine.get_binding_shape(_A ) ) * engine.get_binding_dtype(_A ).itemsize
# Allocate device memory for inputs and outputs.
UpperCamelCase__ : Union[str, Any] = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
UpperCamelCase__ : Dict = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
UpperCamelCase__ : int = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
UpperCamelCase__ : Any = cuda.mem_alloc(h_outputa.nbytes)
UpperCamelCase__ : str = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
UpperCamelCase__ : List[Any] = cuda.Stream()
# Evaluation
logger.info("""***** Running Evaluation *****""")
logger.info(F" Num examples = {len(eval_dataset)}")
logger.info(F" Batch size = {args.per_device_eval_batch_size}")
UpperCamelCase__ : Dict = 0.0
UpperCamelCase__ : str = 0
UpperCamelCase__ : Optional[Any] = timeit.default_timer()
UpperCamelCase__ : Union[str, Any] = None
for step, batch in enumerate(eval_dataloader):
UpperCamelCase__ : Tuple = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
UpperCamelCase__ : Union[str, Any] = outputs
UpperCamelCase__ : Tuple = torch.tensor(start_logits)
UpperCamelCase__ : Tuple = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
UpperCamelCase__ : List[Any] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
UpperCamelCase__ : List[str] = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
UpperCamelCase__ : Tuple = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
UpperCamelCase__ : str = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
UpperCamelCase__ : List[str] = nested_truncate(all_preds, len(eval_dataset))
UpperCamelCase__ : List[str] = timeit.default_timer() - start_time
logger.info(""" Evaluation done in total %f secs (%f sec per example)""", evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info("""Average Inference Time = {:.3f} ms""".format(total_time * 1_000 / niter))
logger.info("""Total Inference Time = {:.3f} ms""".format(total_time * 1_000))
logger.info("""Total Number of Inference = %d""", niter)
UpperCamelCase__ : int = post_processing_function(eval_examples, eval_dataset, all_preds)
UpperCamelCase__ : Tuple = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(F"Evaluation metrics: {eval_metric}")
| 355 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : str = logging.get_logger(__name__)
UpperCamelCase__ : Optional[int] = {
"""studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""",
"""studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""",
}
class lowerCamelCase_ ( a_ ):
SCREAMING_SNAKE_CASE_ = 'luke'
def __init__( self : Dict ,__lowerCamelCase : Optional[Any]=5_02_67 ,__lowerCamelCase : str=50_00_00 ,__lowerCamelCase : Any=7_68 ,__lowerCamelCase : int=2_56 ,__lowerCamelCase : Optional[int]=12 ,__lowerCamelCase : Tuple=12 ,__lowerCamelCase : Any=30_72 ,__lowerCamelCase : Any="gelu" ,__lowerCamelCase : Any=0.1 ,__lowerCamelCase : Tuple=0.1 ,__lowerCamelCase : Tuple=5_12 ,__lowerCamelCase : int=2 ,__lowerCamelCase : Optional[int]=0.02 ,__lowerCamelCase : List[Any]=1e-12 ,__lowerCamelCase : Dict=True ,__lowerCamelCase : Tuple=None ,__lowerCamelCase : Any=1 ,__lowerCamelCase : Dict=0 ,__lowerCamelCase : Any=2 ,**__lowerCamelCase : str ,):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCamelCase ,bos_token_id=__lowerCamelCase ,eos_token_id=__lowerCamelCase ,**__lowerCamelCase )
a = vocab_size
a = entity_vocab_size
a = hidden_size
a = entity_emb_size
a = num_hidden_layers
a = num_attention_heads
a = hidden_act
a = intermediate_size
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = initializer_range
a = layer_norm_eps
a = use_entity_aware_attention
a = classifier_dropout
| 330 | 0 |
import datasets
from .evaluate import evaluate
a__ = """\
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
booktitle={EMNLP},
year={2016}
}
"""
a__ = """
This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
"""
a__ = """
Computes SQuAD scores (F1 and EM).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': the text of the answer
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the SQuAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
Examples:
>>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]
>>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]
>>> squad_metric = datasets.load_metric(\"squad\")
>>> results = squad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self : List[str]) -> Optional[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {"""id""": datasets.Value("""string"""), """prediction_text""": datasets.Value("""string""")},
"""references""": {
"""id""": datasets.Value("""string"""),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string"""),
"""answer_start""": datasets.Value("""int32"""),
}),
},
}) , codebase_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , reference_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , )
def UpperCamelCase_ ( self : Any , lowerCAmelCase : Any , lowerCAmelCase : List[str]) -> int:
"""simple docstring"""
_snake_case : Tuple = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
_snake_case : int = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
_snake_case : Tuple = evaluate(dataset=lowerCAmelCase , predictions=lowerCAmelCase)
return score
| 317 |
import torch
from torch import nn
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : int , lowerCAmelCase : Tuple , lowerCAmelCase : int , lowerCAmelCase : Any , lowerCAmelCase : Tuple , lowerCAmelCase : int=1 , lowerCAmelCase : List[Any]=False) -> str:
"""simple docstring"""
super().__init__()
_snake_case : List[str] = n_token
_snake_case : Any = d_embed
_snake_case : List[str] = d_proj
_snake_case : Optional[int] = cutoffs + [n_token]
_snake_case : Dict = [0] + self.cutoffs
_snake_case : Optional[Any] = div_val
_snake_case : Tuple = self.cutoffs[0]
_snake_case : List[str] = len(self.cutoffs) - 1
_snake_case : str = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
_snake_case : int = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed))
_snake_case : Any = nn.Parameter(torch.zeros(self.n_clusters))
_snake_case : Tuple = nn.ModuleList()
_snake_case : int = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs)):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowerCAmelCase , lowerCAmelCase)))
else:
self.out_projs.append(lowerCAmelCase)
self.out_layers.append(nn.Linear(lowerCAmelCase , lowerCAmelCase))
else:
for i in range(len(self.cutoffs)):
_snake_case , _snake_case : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_snake_case : Dict = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowerCAmelCase , lowerCAmelCase)))
self.out_layers.append(nn.Linear(lowerCAmelCase , r_idx - l_idx))
_snake_case : Tuple = keep_order
def UpperCamelCase_ ( self : List[str] , lowerCAmelCase : Any , lowerCAmelCase : Any , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int]) -> List[str]:
"""simple docstring"""
if proj is None:
_snake_case : List[Any] = nn.functional.linear(lowerCAmelCase , lowerCAmelCase , bias=lowerCAmelCase)
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
_snake_case : List[str] = nn.functional.linear(lowerCAmelCase , proj.t().contiguous())
_snake_case : Optional[int] = nn.functional.linear(lowerCAmelCase , lowerCAmelCase , bias=lowerCAmelCase)
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : int=False) -> Tuple:
"""simple docstring"""
if labels is not None:
# Shift so that tokens < n predict n
_snake_case : List[str] = hidden[..., :-1, :].contiguous()
_snake_case : int = labels[..., 1:].contiguous()
_snake_case : int = hidden.view(-1 , hidden.size(-1))
_snake_case : str = labels.view(-1)
if hidden.size(0) != labels.size(0):
raise RuntimeError("""Input and labels should have the same size in the batch dimension.""")
else:
_snake_case : List[Any] = hidden.view(-1 , hidden.size(-1))
if self.n_clusters == 0:
_snake_case : int = self._compute_logit(lowerCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0])
if labels is not None:
_snake_case : Optional[int] = labels != -100
_snake_case : Union[str, Any] = torch.zeros_like(lowerCAmelCase , dtype=hidden.dtype , device=hidden.device)
_snake_case : Union[str, Any] = (
-nn.functional.log_softmax(lowerCAmelCase , dim=-1)[mask].gather(1 , labels[mask].unsqueeze(1)).squeeze(1)
)
else:
_snake_case : Optional[int] = nn.functional.log_softmax(lowerCAmelCase , dim=-1)
else:
# construct weights and biases
_snake_case , _snake_case : Optional[int] = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
_snake_case , _snake_case : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_snake_case : Dict = self.out_layers[0].weight[l_idx:r_idx]
_snake_case : Tuple = self.out_layers[0].bias[l_idx:r_idx]
else:
_snake_case : Any = self.out_layers[i].weight
_snake_case : Optional[int] = self.out_layers[i].bias
if i == 0:
_snake_case : Dict = torch.cat([weight_i, self.cluster_weight] , dim=0)
_snake_case : List[str] = torch.cat([bias_i, self.cluster_bias] , dim=0)
weights.append(lowerCAmelCase)
biases.append(lowerCAmelCase)
_snake_case , _snake_case , _snake_case : List[Any] = weights[0], biases[0], self.out_projs[0]
_snake_case : List[str] = self._compute_logit(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
_snake_case : Dict = nn.functional.log_softmax(lowerCAmelCase , dim=1)
if labels is None:
_snake_case : List[Any] = hidden.new_empty((head_logit.size(0), self.n_token))
else:
_snake_case : Optional[Any] = torch.zeros_like(lowerCAmelCase , dtype=hidden.dtype , device=hidden.device)
_snake_case : Optional[int] = 0
_snake_case : Union[str, Any] = [0] + self.cutoffs
for i in range(len(lowerCAmelCase) - 1):
_snake_case , _snake_case : Any = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
_snake_case : Optional[int] = (labels >= l_idx) & (labels < r_idx)
_snake_case : Dict = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
_snake_case : Dict = labels.index_select(0 , lowerCAmelCase) - l_idx
_snake_case : List[Any] = head_logprob.index_select(0 , lowerCAmelCase)
_snake_case : Dict = hidden.index_select(0 , lowerCAmelCase)
else:
_snake_case : Optional[Any] = hidden
if i == 0:
if labels is not None:
_snake_case : str = head_logprob_i.gather(1 , target_i[:, None]).squeeze(1)
else:
_snake_case : int = head_logprob[:, : self.cutoffs[0]]
else:
_snake_case , _snake_case , _snake_case : Dict = weights[i], biases[i], self.out_projs[i]
_snake_case : int = self._compute_logit(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
_snake_case : List[str] = nn.functional.log_softmax(lowerCAmelCase , dim=1)
_snake_case : str = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
_snake_case : Dict = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None]).squeeze(1)
else:
_snake_case : Tuple = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
_snake_case : int = logprob_i
if labels is not None:
if (hasattr(self , """keep_order""") and self.keep_order) or keep_order:
out.index_copy_(0 , lowerCAmelCase , -logprob_i)
else:
out[offset : offset + logprob_i.size(0)].copy_(-logprob_i)
offset += logprob_i.size(0)
return out
def UpperCamelCase_ ( self : Union[str, Any] , lowerCAmelCase : Optional[int]) -> Tuple:
"""simple docstring"""
if self.n_clusters == 0:
_snake_case : Optional[Any] = self._compute_logit(lowerCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0])
return nn.functional.log_softmax(lowerCAmelCase , dim=-1)
else:
# construct weights and biases
_snake_case , _snake_case : Optional[int] = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
_snake_case , _snake_case : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_snake_case : Optional[Any] = self.out_layers[0].weight[l_idx:r_idx]
_snake_case : Union[str, Any] = self.out_layers[0].bias[l_idx:r_idx]
else:
_snake_case : Tuple = self.out_layers[i].weight
_snake_case : Any = self.out_layers[i].bias
if i == 0:
_snake_case : Tuple = torch.cat([weight_i, self.cluster_weight] , dim=0)
_snake_case : Optional[Any] = torch.cat([bias_i, self.cluster_bias] , dim=0)
weights.append(lowerCAmelCase)
biases.append(lowerCAmelCase)
_snake_case , _snake_case , _snake_case : int = weights[0], biases[0], self.out_projs[0]
_snake_case : Union[str, Any] = self._compute_logit(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
_snake_case : Any = hidden.new_empty((head_logit.size(0), self.n_token))
_snake_case : Optional[Any] = nn.functional.log_softmax(lowerCAmelCase , dim=1)
_snake_case : List[Any] = [0] + self.cutoffs
for i in range(len(lowerCAmelCase) - 1):
_snake_case , _snake_case : Any = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
_snake_case : Union[str, Any] = head_logprob[:, : self.cutoffs[0]]
else:
_snake_case , _snake_case , _snake_case : str = weights[i], biases[i], self.out_projs[i]
_snake_case : List[str] = self._compute_logit(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
_snake_case : str = nn.functional.log_softmax(lowerCAmelCase , dim=1)
_snake_case : Dict = head_logprob[:, -i] + tail_logprob_i
_snake_case : Any = logprob_i
return out
| 317 | 1 |
'''simple docstring'''
def _a ( _lowercase : List[str] ):
'''simple docstring'''
__UpperCAmelCase : str = 1
__UpperCAmelCase : List[str] = 2
while i * i <= n:
__UpperCAmelCase : Optional[Any] = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def _a ( ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = 1
__UpperCAmelCase : List[Any] = 1
while True:
i += 1
t_num += i
if count_divisors(_lowercase ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution()) | 240 |
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase :Any = logging.get_logger(__name__)
__UpperCAmelCase :Dict = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def _a ( _lowercase : Tuple ):
'''simple docstring'''
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__UpperCAmelCase : Any = k.replace(_lowercase , _lowercase )
if k.startswith('''encoder''' ):
__UpperCAmelCase : str = k.replace('''.attn''' , '''.self_attn''' )
__UpperCAmelCase : Any = k.replace('''norm1''' , '''self_attn_layer_norm''' )
__UpperCAmelCase : List[str] = k.replace('''norm2''' , '''final_layer_norm''' )
elif k.startswith('''decoder''' ):
__UpperCAmelCase : int = k.replace('''norm1''' , '''self_attn_layer_norm''' )
__UpperCAmelCase : Union[str, Any] = k.replace('''norm2''' , '''encoder_attn_layer_norm''' )
__UpperCAmelCase : List[Any] = k.replace('''norm3''' , '''final_layer_norm''' )
return k
def _a ( _lowercase : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : int = [
'''model.encoder.layernorm_embedding.weight''',
'''model.encoder.layernorm_embedding.bias''',
'''model.decoder.layernorm_embedding.weight''',
'''model.decoder.layernorm_embedding.bias''',
]
for k in keys:
__UpperCAmelCase : Any = sd.pop(_lowercase )
__UpperCAmelCase : Optional[int] = k.replace('''layernorm_embedding''' , '''layer_norm''' )
assert new_k not in sd
__UpperCAmelCase : List[str] = v
__UpperCAmelCase :str = ["START"]
@torch.no_grad()
def _a ( _lowercase : Optional[int] , _lowercase : Optional[int] , _lowercase : str ):
'''simple docstring'''
__UpperCAmelCase : Any = torch.load(_lowercase , map_location='''cpu''' )
__UpperCAmelCase : List[str] = model['''model''']
__UpperCAmelCase : Optional[Any] = BlenderbotConfig.from_json_file(_lowercase )
__UpperCAmelCase : Optional[Any] = BlenderbotForConditionalGeneration(_lowercase )
__UpperCAmelCase : Optional[Any] = m.model.state_dict().keys()
__UpperCAmelCase : int = []
__UpperCAmelCase : List[str] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__UpperCAmelCase : int = rename_state_dict_key(_lowercase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__UpperCAmelCase : Union[str, Any] = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(_lowercase )
m.model.load_state_dict(_lowercase , strict=_lowercase )
m.half()
m.save_pretrained(_lowercase )
if __name__ == "__main__":
__UpperCAmelCase :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
__UpperCAmelCase :Tuple = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json) | 240 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCamelCase : Optional[int] = {
'''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''',
}
class __snake_case ( lowerCamelCase_ ):
lowerCAmelCase_ = "mgp-str"
def __init__( self : Union[str, Any] , _lowercase : List[Any]=[32, 1_28] , _lowercase : Union[str, Any]=4 , _lowercase : Tuple=3 , _lowercase : Dict=27 , _lowercase : Tuple=38 , _lowercase : List[Any]=5_02_57 , _lowercase : Optional[int]=3_05_22 , _lowercase : Optional[Any]=7_68 , _lowercase : Dict=12 , _lowercase : Optional[Any]=12 , _lowercase : Tuple=4.0 , _lowercase : Union[str, Any]=True , _lowercase : Optional[int]=False , _lowercase : Optional[Any]=1E-5 , _lowercase : Tuple=0.0 , _lowercase : List[str]=0.0 , _lowercase : Union[str, Any]=0.0 , _lowercase : Dict=False , _lowercase : Union[str, Any]=0.02 , **_lowercase : List[str] , ):
"""simple docstring"""
super().__init__(**_lowercase )
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = patch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = max_token_length
SCREAMING_SNAKE_CASE__ = num_character_labels
SCREAMING_SNAKE_CASE__ = num_bpe_labels
SCREAMING_SNAKE_CASE__ = num_wordpiece_labels
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = mlp_ratio
SCREAMING_SNAKE_CASE__ = distilled
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = drop_rate
SCREAMING_SNAKE_CASE__ = qkv_bias
SCREAMING_SNAKE_CASE__ = attn_drop_rate
SCREAMING_SNAKE_CASE__ = drop_path_rate
SCREAMING_SNAKE_CASE__ = output_aa_attentions
SCREAMING_SNAKE_CASE__ = initializer_range
| 219 | from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
__lowerCamelCase : str = input('''Enter image url: ''').strip()
print(F"""Downloading image from {url} ...""")
__lowerCamelCase : Any = BeautifulSoup(requests.get(url).content, '''html.parser''')
# The image URL is in the content field of the first meta tag with property og:image
__lowerCamelCase : List[Any] = soup.find('''meta''', {'''property''': '''og:image'''})['''content''']
__lowerCamelCase : Tuple = requests.get(image_url).content
__lowerCamelCase : Union[str, Any] = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, '''wb''') as fp:
fp.write(image_data)
print(F"""Done. Image saved to disk as {file_name}.""")
| 219 | 1 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_lowercase : List[str] ={"configuration_gpt_neox": ["GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXConfig"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int =["GPTNeoXTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any =[
"GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXForCausalLM",
"GPTNeoXForQuestionAnswering",
"GPTNeoXForSequenceClassification",
"GPTNeoXForTokenClassification",
"GPTNeoXLayer",
"GPTNeoXModel",
"GPTNeoXPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
_lowercase : Tuple =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 357 |
def lowerCAmelCase_ ( _lowercase : int) -> int:
"""simple docstring"""
if not isinstance(_lowercase , _lowercase):
raise TypeError("""only integers accepted as input""")
else:
a__ : Any = str(abs(_lowercase))
a__ : str = [list(_lowercase) for char in range(len(_lowercase))]
for index in range(len(_lowercase)):
num_transpositions[index].pop(_lowercase)
return max(
int("""""".join(list(_lowercase))) for transposition in num_transpositions)
if __name__ == "__main__":
__import__("doctest").testmod()
| 266 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class A :
def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=9_9 , lowerCAmelCase=3_2 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=3_7 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=1_6 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=None , ):
__lowercase= parent
__lowercase= batch_size
__lowercase= seq_length
__lowercase= is_training
__lowercase= use_token_type_ids
__lowercase= use_labels
__lowercase= vocab_size
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_attention_heads
__lowercase= intermediate_size
__lowercase= hidden_act
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= max_position_embeddings
__lowercase= type_vocab_size
__lowercase= type_sequence_label_size
__lowercase= initializer_range
__lowercase= num_labels
__lowercase= num_choices
__lowercase= scope
__lowercase= self.vocab_size - 1
def _A (self ):
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase= None
if self.use_token_type_ids:
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase= None
__lowercase= None
__lowercase= None
if self.use_labels:
__lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase= ids_tensor([self.batch_size] , self.num_choices )
__lowercase= OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
__lowercase= ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ):
__lowercase= OpenAIGPTModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , head_mask=lowerCAmelCase )
__lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase )
__lowercase= model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ):
__lowercase= OpenAIGPTLMHeadModel(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ):
__lowercase= OpenAIGPTDoubleHeadsModel(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ):
__lowercase= self.num_labels
__lowercase= OpenAIGPTForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A (self ):
__lowercase= self.prepare_config_and_inputs()
(
(
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
),
)= config_and_inputs
__lowercase= {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class A ( A_ , A_ , A_ , unittest.TestCase ):
UpperCamelCase_ : Optional[Any] =(
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
UpperCamelCase_ : Tuple =(
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
UpperCamelCase_ : List[str] =(
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ):
__lowercase= super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
__lowercase= torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase , )
__lowercase= inputs_dict['labels']
__lowercase= inputs_dict['labels']
__lowercase= torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowerCAmelCase , )
__lowercase= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase )
return inputs_dict
def _A (self ):
__lowercase= OpenAIGPTModelTester(self )
__lowercase= ConfigTester(self , config_class=lowerCAmelCase , n_embd=3_7 )
def _A (self ):
self.config_tester.run_common_tests()
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowerCAmelCase )
@slow
def _A (self ):
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase= OpenAIGPTModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@require_torch
class A ( unittest.TestCase ):
@slow
def _A (self ):
__lowercase= OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(lowerCAmelCase )
__lowercase= torch.tensor([[4_8_1, 4_7_3_5, 5_4_4]] , dtype=torch.long , device=lowerCAmelCase ) # the president is
__lowercase= [
4_8_1,
4_7_3_5,
5_4_4,
2_4_6,
9_6_3,
8_7_0,
7_6_2,
2_3_9,
2_4_4,
4_0_4_7_7,
2_4_4,
2_4_9,
7_1_9,
8_8_1,
4_8_7,
5_4_4,
2_4_0,
2_4_4,
6_0_3,
4_8_1,
] # the president is a very good man. " \n " i\'m sure he is, " said the
__lowercase= model.generate(lowerCAmelCase , do_sample=lowerCAmelCase )
self.assertListEqual(output_ids[0].tolist() , lowerCAmelCase )
| 295 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class A ( A_ ):
UpperCamelCase_ : torch.FloatTensor
class A ( A_ , A_ ):
@register_to_config
def __init__(self , lowerCAmelCase = 3 , lowerCAmelCase = 3 , lowerCAmelCase = ("DownEncoderBlock2D",) , lowerCAmelCase = ("UpDecoderBlock2D",) , lowerCAmelCase = (6_4,) , lowerCAmelCase = 1 , lowerCAmelCase = "silu" , lowerCAmelCase = 3 , lowerCAmelCase = 3_2 , lowerCAmelCase = 2_5_6 , lowerCAmelCase = 3_2 , lowerCAmelCase = None , lowerCAmelCase = 0.1_82_15 , lowerCAmelCase = "group" , ):
super().__init__()
# pass init params to Encoder
__lowercase= Encoder(
in_channels=lowerCAmelCase , out_channels=lowerCAmelCase , down_block_types=lowerCAmelCase , block_out_channels=lowerCAmelCase , layers_per_block=lowerCAmelCase , act_fn=lowerCAmelCase , norm_num_groups=lowerCAmelCase , double_z=lowerCAmelCase , )
__lowercase= vq_embed_dim if vq_embed_dim is not None else latent_channels
__lowercase= nn.Convad(lowerCAmelCase , lowerCAmelCase , 1 )
__lowercase= VectorQuantizer(lowerCAmelCase , lowerCAmelCase , beta=0.25 , remap=lowerCAmelCase , sane_index_shape=lowerCAmelCase )
__lowercase= nn.Convad(lowerCAmelCase , lowerCAmelCase , 1 )
# pass init params to Decoder
__lowercase= Decoder(
in_channels=lowerCAmelCase , out_channels=lowerCAmelCase , up_block_types=lowerCAmelCase , block_out_channels=lowerCAmelCase , layers_per_block=lowerCAmelCase , act_fn=lowerCAmelCase , norm_num_groups=lowerCAmelCase , norm_type=lowerCAmelCase , )
@apply_forward_hook
def _A (self , lowerCAmelCase , lowerCAmelCase = True ):
__lowercase= self.encoder(lowerCAmelCase )
__lowercase= self.quant_conv(lowerCAmelCase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=lowerCAmelCase )
@apply_forward_hook
def _A (self , lowerCAmelCase , lowerCAmelCase = False , lowerCAmelCase = True ):
# also go through quantization layer
if not force_not_quantize:
__lowercase, __lowercase, __lowercase= self.quantize(lowerCAmelCase )
else:
__lowercase= h
__lowercase= self.post_quant_conv(lowerCAmelCase )
__lowercase= self.decoder(lowerCAmelCase , quant if self.config.norm_type == 'spatial' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase = True ):
__lowercase= sample
__lowercase= self.encode(lowerCAmelCase ).latents
__lowercase= self.decode(lowerCAmelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCAmelCase )
| 295 | 1 |
"""simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def _snake_case ( lowercase__ : float , lowercase__ : float , lowercase__ : bool = False ) -> list[float]:
'''simple docstring'''
if radian_mode:
return [magnitude * cos(lowercase__ ), magnitude * sin(lowercase__ )]
return [magnitude * cos(radians(lowercase__ ) ), magnitude * sin(radians(lowercase__ ) )]
def _snake_case ( lowercase__ : NDArray[floataa] , lowercase__ : NDArray[floataa] , lowercase__ : float = 1_0**-1 ) -> bool:
'''simple docstring'''
lowerCAmelCase_ :NDArray[floataa] = cross(lowercase__ , lowercase__ )
lowerCAmelCase_ :float = sum(lowercase__ )
return abs(lowercase__ ) < eps
if __name__ == "__main__":
# Test to check if it works
__UpperCAmelCase = array(
[
polar_force(718.4, 1_80 - 30),
polar_force(879.54, 45),
polar_force(1_00, -90),
]
)
__UpperCAmelCase = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
__UpperCAmelCase = array(
[
polar_force(30 * 9.81, 15),
polar_force(2_15, 1_80 - 45),
polar_force(2_64, 90 - 30),
]
)
__UpperCAmelCase = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
__UpperCAmelCase = array([[0, -20_00], [0, -12_00], [0, 1_56_00], [0, -1_24_00]])
__UpperCAmelCase = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 1 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__UpperCAmelCase = 16
__UpperCAmelCase = 32
def _snake_case ( lowercase__ : Accelerator , lowercase__ : int = 1_6 ) -> str:
'''simple docstring'''
lowerCAmelCase_ :List[str] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowerCAmelCase_ :Optional[Any] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowercase__ : int ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ :Optional[int] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase_ :Optional[Any] = datasets.map(
lowercase__ , batched=lowercase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase_ :str = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase__ : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase_ :int = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase_ :List[Any] = 1_6
elif accelerator.mixed_precision != "no":
lowerCAmelCase_ :List[str] = 8
else:
lowerCAmelCase_ :Optional[int] = None
return tokenizer.pad(
lowercase__ , padding="""longest""" , max_length=lowercase__ , pad_to_multiple_of=lowercase__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowerCAmelCase_ :Optional[Any] = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
lowerCAmelCase_ :List[Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__UpperCAmelCase = mocked_dataloaders # noqa: F811
def _snake_case ( lowercase__ : List[Any] , lowercase__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , lowercase__ ) == "1":
lowerCAmelCase_ :Optional[Any] = 2
# New Code #
lowerCAmelCase_ :List[str] = int(args.gradient_accumulation_steps )
lowerCAmelCase_ :int = int(args.local_sgd_steps )
# Initialize accelerator
lowerCAmelCase_ :str = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowercase__ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase_ :int = config["""lr"""]
lowerCAmelCase_ :Union[str, Any] = int(config["""num_epochs"""] )
lowerCAmelCase_ :int = int(config["""seed"""] )
lowerCAmelCase_ :Union[str, Any] = int(config["""batch_size"""] )
lowerCAmelCase_ :Union[str, Any] = evaluate.load("""glue""" , """mrpc""" )
set_seed(lowercase__ )
lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = get_dataloaders(lowercase__ , lowercase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase_ :Optional[int] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowercase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase_ :Union[str, Any] = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase_ :Optional[Any] = AdamW(params=model.parameters() , lr=lowercase__ )
# Instantiate scheduler
lowerCAmelCase_ :Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=lowercase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(lowercase__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Now we train the model
for epoch in range(lowercase__ ):
model.train()
with LocalSGD(
accelerator=lowercase__ , model=lowercase__ , local_sgd_steps=lowercase__ , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(lowercase__ ):
lowerCAmelCase_ :str = model(**lowercase__ )
lowerCAmelCase_ :Optional[int] = output.loss
accelerator.backward(lowercase__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase_ :Optional[int] = model(**lowercase__ )
lowerCAmelCase_ :Optional[int] = outputs.logits.argmax(dim=-1 )
lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowercase__ , references=lowercase__ , )
lowerCAmelCase_ :Any = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , lowercase__ )
def _snake_case ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ :str = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowercase__ , default=lowercase__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=lowercase__ , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument(
"""--local_sgd_steps""" , type=lowercase__ , default=8 , help="""Number of local SGD steps or None to disable local SGD""" )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
lowerCAmelCase_ :Optional[Any] = parser.parse_args()
lowerCAmelCase_ :Tuple = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6}
training_function(lowercase__ , lowercase__ )
if __name__ == "__main__":
main()
| 1 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all BART models at https://huggingface.co/models?filter=bart
lowerCAmelCase__ = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json''',
},
}
lowerCAmelCase__ = {
'''facebook/bart-base''': 1024,
'''facebook/bart-large''': 1024,
'''facebook/bart-large-mnli''': 1024,
'''facebook/bart-large-cnn''': 1024,
'''facebook/bart-large-xsum''': 1024,
'''yjernite/bart_eli5''': 1024,
}
class __snake_case ( _lowercase):
snake_case__ : Any = VOCAB_FILES_NAMES
snake_case__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : Optional[int] = ["input_ids", "attention_mask"]
snake_case__ : Any = BartTokenizer
def __init__( self : int , __lowerCAmelCase : Dict=None , __lowerCAmelCase : int=None , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[Any]="replace" , __lowerCAmelCase : Any="<s>" , __lowerCAmelCase : Optional[int]="</s>" , __lowerCAmelCase : str="</s>" , __lowerCAmelCase : Dict="<s>" , __lowerCAmelCase : Union[str, Any]="<unk>" , __lowerCAmelCase : Any="<pad>" , __lowerCAmelCase : Optional[Any]="<mask>" , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Dict=True , **__lowerCAmelCase : Optional[Any] , ):
"""simple docstring"""
super().__init__(
__lowerCAmelCase , __lowerCAmelCase , tokenizer_file=__lowerCAmelCase , errors=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase , **__lowerCAmelCase , )
_lowerCamelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __lowerCAmelCase ) != add_prefix_space:
_lowerCamelCase : Dict = getattr(__lowerCAmelCase , pre_tok_state.pop('''type''' ) )
_lowerCamelCase : Any = add_prefix_space
_lowerCamelCase : int = pre_tok_class(**__lowerCAmelCase )
_lowerCamelCase : Optional[int] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_lowerCamelCase : List[str] = '''post_processor'''
_lowerCamelCase : List[str] = getattr(self.backend_tokenizer , __lowerCAmelCase , __lowerCAmelCase )
if tokenizer_component_instance:
_lowerCamelCase : int = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCamelCase : Tuple = tuple(state['''sep'''] )
if "cls" in state:
_lowerCamelCase : int = tuple(state['''cls'''] )
_lowerCamelCase : Union[str, Any] = False
if state.get('''add_prefix_space''' , __lowerCAmelCase ) != add_prefix_space:
_lowerCamelCase : Dict = add_prefix_space
_lowerCamelCase : Optional[Any] = True
if state.get('''trim_offsets''' , __lowerCAmelCase ) != trim_offsets:
_lowerCamelCase : Any = trim_offsets
_lowerCamelCase : str = True
if changes_to_apply:
_lowerCamelCase : List[str] = getattr(__lowerCAmelCase , state.pop('''type''' ) )
_lowerCamelCase : str = component_class(**__lowerCAmelCase )
setattr(self.backend_tokenizer , __lowerCAmelCase , __lowerCAmelCase )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Tuple = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else value
_lowerCamelCase : str = value
def SCREAMING_SNAKE_CASE ( self : int , *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = kwargs.get('''is_split_into_words''' , __lowerCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*__lowerCAmelCase , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , *__lowerCAmelCase : Tuple , **__lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Any = kwargs.get('''is_split_into_words''' , __lowerCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*__lowerCAmelCase , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ):
"""simple docstring"""
_lowerCamelCase : Tuple = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase )
return tuple(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str]=None ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ):
"""simple docstring"""
_lowerCamelCase : List[str] = [self.sep_token_id]
_lowerCamelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 72 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''ReformerTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''ReformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ReformerAttention''',
'''ReformerForMaskedLM''',
'''ReformerForQuestionAnswering''',
'''ReformerForSequenceClassification''',
'''ReformerLayer''',
'''ReformerModel''',
'''ReformerModelWithLMHead''',
'''ReformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 72 | 1 |
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
# Initialise PyTorch model
SCREAMING_SNAKE_CASE_ = BigBirdConfig.from_json_file(__lowerCamelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
if is_trivia_qa:
SCREAMING_SNAKE_CASE_ = BigBirdForQuestionAnswering(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE_ = BigBirdForPreTraining(__lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(__lowerCamelCase, __lowerCamelCase, is_trivia_qa=__lowerCamelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--big_bird_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_trivia_qa", action="store_true", help="Whether to convert a model with a trivia_qa head."
)
__UpperCAmelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 257 |
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =None
UpperCAmelCase_ =None
@property
def _UpperCamelCase ( self ) -> Dict:
return self.feat_extract_tester.prepare_feat_extract_dict()
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_A , '''feature_size''' ) )
self.assertTrue(hasattr(_A , '''sampling_rate''' ) )
self.assertTrue(hasattr(_A , '''padding_value''' ) )
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_A ) == len(_A ) for x, y in zip(_A , processed_features[input_name] ) ) )
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_A )
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} , tensor_type='''np''' )
SCREAMING_SNAKE_CASE_ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE_ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_A )
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} , tensor_type='''pt''' )
SCREAMING_SNAKE_CASE_ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE_ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_A )
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} , tensor_type='''tf''' )
SCREAMING_SNAKE_CASE_ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE_ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def _UpperCamelCase ( self , _A=False ) -> Tuple:
def _inputs_have_equal_length(_A ):
SCREAMING_SNAKE_CASE_ = len(input[0] )
for input_slice in input[1:]:
if len(_A ) != length:
return False
return True
def _inputs_are_equal(_A , _A ):
if len(_A ) != len(_A ):
return False
for input_slice_a, input_slice_a in zip(_A , _A ):
if not np.allclose(np.asarray(_A ) , np.asarray(_A ) , atol=1E-3 ):
return False
return True
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_common(numpify=_A )
SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.seq_length_diff
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.max_seq_length + pad_diff
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.min_seq_length
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.batch_size
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
SCREAMING_SNAKE_CASE_ = feat_extract.pad(_A , padding=_A )
SCREAMING_SNAKE_CASE_ = input_a[input_name]
SCREAMING_SNAKE_CASE_ = feat_extract.pad(_A , padding='''longest''' )
SCREAMING_SNAKE_CASE_ = input_a[input_name]
SCREAMING_SNAKE_CASE_ = feat_extract.pad(_A , padding='''max_length''' , max_length=len(speech_inputs[-1] ) )
SCREAMING_SNAKE_CASE_ = input_a[input_name]
SCREAMING_SNAKE_CASE_ = feat_extract.pad(_A , padding='''longest''' , return_tensors='''np''' )
SCREAMING_SNAKE_CASE_ = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(_A ):
feat_extract.pad(_A , padding='''max_length''' )[input_name]
SCREAMING_SNAKE_CASE_ = feat_extract.pad(
_A , padding='''max_length''' , max_length=_A , return_tensors='''np''' )
SCREAMING_SNAKE_CASE_ = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(_A ) )
self.assertTrue(_inputs_have_equal_length(_A ) )
self.assertTrue(_inputs_have_equal_length(_A ) )
self.assertTrue(_inputs_are_equal(_A , _A ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
SCREAMING_SNAKE_CASE_ = feat_extract.pad(_A , pad_to_multiple_of=10 )
SCREAMING_SNAKE_CASE_ = input_a[input_name]
SCREAMING_SNAKE_CASE_ = feat_extract.pad(_A , padding='''longest''' , pad_to_multiple_of=10 )
SCREAMING_SNAKE_CASE_ = input_a[input_name]
SCREAMING_SNAKE_CASE_ = feat_extract.pad(
_A , padding='''max_length''' , pad_to_multiple_of=10 , max_length=_A )
SCREAMING_SNAKE_CASE_ = input_a[input_name]
SCREAMING_SNAKE_CASE_ = feat_extract.pad(
_A , padding='''max_length''' , pad_to_multiple_of=10 , max_length=_A , return_tensors='''np''' , )
SCREAMING_SNAKE_CASE_ = input_a[input_name]
self.assertTrue(all(len(_A ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(_A , _A ) )
SCREAMING_SNAKE_CASE_ = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(_A ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
SCREAMING_SNAKE_CASE_ = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1E-3 )
def _UpperCamelCase ( self , _A=False ) -> Optional[int]:
def _inputs_have_equal_length(_A ):
SCREAMING_SNAKE_CASE_ = len(input[0] )
for input_slice in input[1:]:
if len(_A ) != length:
return False
return True
def _inputs_are_equal(_A , _A ):
if len(_A ) != len(_A ):
return False
for input_slice_a, input_slice_a in zip(_A , _A ):
if not np.allclose(np.asarray(_A ) , np.asarray(_A ) , atol=1E-3 ):
return False
return True
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_common(numpify=_A )
SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
SCREAMING_SNAKE_CASE_ = feat_extract.pad(
_A , padding='''max_length''' , max_length=len(speech_inputs[0] ) , truncation=_A )
SCREAMING_SNAKE_CASE_ = input_a[input_name]
SCREAMING_SNAKE_CASE_ = feat_extract.pad(_A , padding='''max_length''' , max_length=len(speech_inputs[0] ) )
SCREAMING_SNAKE_CASE_ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_A ) )
self.assertFalse(_inputs_have_equal_length(_A ) )
# truncate to smallest with np
SCREAMING_SNAKE_CASE_ = feat_extract.pad(
_A , padding='''max_length''' , max_length=len(speech_inputs[0] ) , return_tensors='''np''' , truncation=_A , )
SCREAMING_SNAKE_CASE_ = input_a[input_name]
SCREAMING_SNAKE_CASE_ = feat_extract.pad(
_A , padding='''max_length''' , max_length=len(speech_inputs[0] ) , return_tensors='''np''' )
SCREAMING_SNAKE_CASE_ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_A ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_A ) )
# truncate to middle
SCREAMING_SNAKE_CASE_ = feat_extract.pad(
_A , padding='''max_length''' , max_length=len(speech_inputs[1] ) , truncation=_A , return_tensors='''np''' , )
SCREAMING_SNAKE_CASE_ = input_a[input_name]
SCREAMING_SNAKE_CASE_ = feat_extract.pad(
_A , padding='''max_length''' , max_length=len(speech_inputs[1] ) , truncation=_A )
SCREAMING_SNAKE_CASE_ = input_a[input_name]
SCREAMING_SNAKE_CASE_ = feat_extract.pad(
_A , padding='''max_length''' , max_length=len(speech_inputs[1] ) , return_tensors='''np''' )
SCREAMING_SNAKE_CASE_ = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(_A ) )
self.assertTrue(_inputs_have_equal_length(_A ) )
self.assertTrue(_inputs_are_equal(_A , _A ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_A ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_A ):
feat_extract.pad(_A , truncation=_A )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_A ):
feat_extract.pad(_A , padding='''longest''' , truncation=_A )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_A ):
feat_extract.pad(_A , padding='''longest''' , truncation=_A )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(_A ):
feat_extract.pad(_A , padding='''max_length''' , truncation=_A )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
SCREAMING_SNAKE_CASE_ = 12
SCREAMING_SNAKE_CASE_ = feat_extract.pad(
_A , padding='''max_length''' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_A , truncation=_A , )
SCREAMING_SNAKE_CASE_ = input_a[input_name]
SCREAMING_SNAKE_CASE_ = feat_extract.pad(
_A , padding='''max_length''' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_A , )
SCREAMING_SNAKE_CASE_ = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
SCREAMING_SNAKE_CASE_ = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
SCREAMING_SNAKE_CASE_ = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(_A ) )
self.assertFalse(_inputs_have_equal_length(_A ) )
def _UpperCamelCase ( self ) -> Dict:
self._check_padding(numpify=_A )
def _UpperCamelCase ( self ) -> Dict:
self._check_padding(numpify=_A )
def _UpperCamelCase ( self ) -> List[str]:
self._check_truncation(numpify=_A )
def _UpperCamelCase ( self ) -> int:
self._check_truncation(numpify=_A )
@require_torch
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE_ = feat_extract.pad(_A , padding='''longest''' , return_tensors='''np''' )[input_name]
SCREAMING_SNAKE_CASE_ = feat_extract.pad(_A , padding='''longest''' , return_tensors='''pt''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
@require_tf
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE_ = feat_extract.pad(_A , padding='''longest''' , return_tensors='''np''' )[input_name]
SCREAMING_SNAKE_CASE_ = feat_extract.pad(_A , padding='''longest''' , return_tensors='''tf''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = self.feat_extract_dict
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**_A )
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE_ = [len(_A ) for x in speech_inputs]
SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE_ = feat_extract.pad(_A , padding='''longest''' , return_tensors='''np''' )
self.assertIn('''attention_mask''' , _A )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _A )
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.feat_extract_dict
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**_A )
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE_ = [len(_A ) for x in speech_inputs]
SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE_ = min(_A )
SCREAMING_SNAKE_CASE_ = feat_extract.pad(
_A , padding='''max_length''' , max_length=_A , truncation=_A , return_tensors='''np''' )
self.assertIn('''attention_mask''' , _A )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 257 | 1 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
__lowerCamelCase : int = '''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
__lowerCamelCase : List[Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
__lowerCamelCase : Union[str, Any] = dict(zip(vocab, range(len(vocab))))
__lowerCamelCase : str = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase : int = Path(tmpdirname)
__lowerCamelCase : Union[str, Any] = build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
__lowerCamelCase : Any = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
__lowerCamelCase : Optional[int] = build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
__lowerCamelCase : Union[str, Any] = FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
__lowerCamelCase : Union[str, Any] = FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=1000,
tgt_vocab_size=1000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
__lowerCamelCase : int = FSMTForConditionalGeneration(config)
print(F"""num of params {tiny_model.num_parameters()}""")
# Test
__lowerCamelCase : List[Any] = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
__lowerCamelCase : Union[str, Any] = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 219 |
import requests
from bsa import BeautifulSoup
def A ( _UpperCAmelCase : str , _UpperCAmelCase : dict ) -> str:
'''simple docstring'''
_UpperCAmelCase = BeautifulSoup(requests.get(_UpperCAmelCase , params=_UpperCAmelCase ).content , 'html.parser' )
_UpperCAmelCase = soup.find('div' , attrs={'class': 'gs_ri'} )
_UpperCAmelCase = div.find('div' , attrs={'class': 'gs_fl'} ).find_all('a' )
return anchors[2].get_text()
if __name__ == "__main__":
UpperCAmelCase__ = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 339 | 0 |
from __future__ import annotations
class __UpperCAmelCase :
def __init__( self : int, __A : int ):
UpperCAmelCase : Tuple = data
UpperCAmelCase : Node | None = None
UpperCAmelCase : Node | None = None
def a__ ( UpperCAmelCase : Node | None ) -> None: # In Order traversal of the tree
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def a__ ( UpperCAmelCase : Node | None ) -> int:
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def a__ ( UpperCAmelCase : Node ) -> bool:
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def a__ ( ) -> None: # Main function for testing.
UpperCAmelCase : int = Node(1 )
UpperCAmelCase : List[Any] = Node(2 )
UpperCAmelCase : Union[str, Any] = Node(3 )
UpperCAmelCase : Dict = Node(4 )
UpperCAmelCase : Any = Node(5 )
UpperCAmelCase : str = Node(6 )
UpperCAmelCase : Optional[Any] = Node(7 )
UpperCAmelCase : int = Node(8 )
UpperCAmelCase : Any = Node(9 )
print(is_full_binary_tree(UpperCAmelCase ) )
print(depth_of_tree(UpperCAmelCase ) )
print('''Tree is: ''' )
display(UpperCAmelCase )
if __name__ == "__main__":
main()
| 367 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
_lowerCamelCase : Optional[Any] = get_logger(__name__)
class __UpperCAmelCase :
def __init__( self : Any, __A : Optional[str] = None ):
UpperCAmelCase : str = (
os.path.join(__A, config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
UpperCAmelCase : str = Extractor
def __magic_name__ ( self : str, __A : str ):
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
UpperCAmelCase : int = os.path.abspath(__A )
return os.path.join(self.extract_dir, hash_url_to_filename(__A ) )
def __magic_name__ ( self : int, __A : str, __A : bool ):
return force_extract or (
not os.path.isfile(__A ) and not (os.path.isdir(__A ) and os.listdir(__A ))
)
def __magic_name__ ( self : str, __A : str, __A : bool = False ):
UpperCAmelCase : Any = self.extractor.infer_extractor_format(__A )
if not extractor_format:
return input_path
UpperCAmelCase : Tuple = self._get_output_path(__A )
if self._do_extract(__A, __A ):
self.extractor.extract(__A, __A, __A )
return output_path
class __UpperCAmelCase ( lowerCamelCase__ ):
@classmethod
@abstractmethod
def __magic_name__ ( cls : int, __A : Union[Path, str], **__A : List[Any] ):
...
@staticmethod
@abstractmethod
def __magic_name__ ( __A : Union[Path, str], __A : Union[Path, str] ):
...
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
UpperCamelCase = []
@staticmethod
def __magic_name__ ( __A : Union[Path, str], __A : int ):
with open(__A, '''rb''' ) as f:
return f.read(__A )
@classmethod
def __magic_name__ ( cls : List[str], __A : Union[Path, str], __A : bytes = b"" ):
if not magic_number:
UpperCAmelCase : int = max(len(__A ) for cls_magic_number in cls.magic_numbers )
try:
UpperCAmelCase : Any = cls.read_magic_number(__A, __A )
except OSError:
return False
return any(magic_number.startswith(__A ) for cls_magic_number in cls.magic_numbers )
class __UpperCAmelCase ( lowerCamelCase__ ):
@classmethod
def __magic_name__ ( cls : Union[str, Any], __A : Union[Path, str], **__A : int ):
return tarfile.is_tarfile(__A )
@staticmethod
def __magic_name__ ( __A : List[Any], __A : Any ):
def resolved(__A : str ) -> str:
return os.path.realpath(os.path.abspath(__A ) )
def badpath(__A : str, __A : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(__A, __A ) ).startswith(__A )
def badlink(__A : List[Any], __A : str ) -> bool:
# Links are interpreted relative to the directory containing the link
UpperCAmelCase : Dict = resolved(os.path.join(__A, os.path.dirname(info.name ) ) )
return badpath(info.linkname, base=__A )
UpperCAmelCase : Any = resolved(__A )
for finfo in members:
if badpath(finfo.name, __A ):
logger.error(F'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(__A, __A ):
logger.error(F'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(__A, __A ):
logger.error(F'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def __magic_name__ ( __A : Union[Path, str], __A : Union[Path, str] ):
os.makedirs(__A, exist_ok=__A )
UpperCAmelCase : Any = tarfile.open(__A )
tar_file.extractall(__A, members=TarExtractor.safemembers(__A, __A ) )
tar_file.close()
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = [B"""\x1F\x8B"""]
@staticmethod
def __magic_name__ ( __A : Union[Path, str], __A : Union[Path, str] ):
with gzip.open(__A, '''rb''' ) as gzip_file:
with open(__A, '''wb''' ) as extracted_file:
shutil.copyfileobj(__A, __A )
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = [
B"""PK\x03\x04""",
B"""PK\x05\x06""", # empty archive
B"""PK\x07\x08""", # spanned archive
]
@classmethod
def __magic_name__ ( cls : Union[str, Any], __A : Union[Path, str], __A : bytes = b"" ):
if super().is_extractable(__A, magic_number=__A ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(__A, '''rb''' ) as fp:
UpperCAmelCase : Union[str, Any] = _EndRecData(__A )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
UpperCAmelCase : Optional[int] = fp.read(__A ) # CD is where we expect it to be
if len(__A ) == sizeCentralDir:
UpperCAmelCase : List[Any] = struct.unpack(__A, __A ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def __magic_name__ ( __A : Union[Path, str], __A : Union[Path, str] ):
os.makedirs(__A, exist_ok=__A )
with zipfile.ZipFile(__A, '''r''' ) as zip_file:
zip_file.extractall(__A )
zip_file.close()
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = [B"""\xFD\x37\x7A\x58\x5A\x00"""]
@staticmethod
def __magic_name__ ( __A : Union[Path, str], __A : Union[Path, str] ):
with lzma.open(__A ) as compressed_file:
with open(__A, '''wb''' ) as extracted_file:
shutil.copyfileobj(__A, __A )
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = [B"""Rar!\x1a\x07\x00""", B"""Rar!\x1a\x07\x01\x00"""] # RAR_ID # RAR5_ID
@staticmethod
def __magic_name__ ( __A : Union[Path, str], __A : Union[Path, str] ):
if not config.RARFILE_AVAILABLE:
raise ImportError('''Please pip install rarfile''' )
import rarfile
os.makedirs(__A, exist_ok=__A )
UpperCAmelCase : Tuple = rarfile.RarFile(__A )
rf.extractall(__A )
rf.close()
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = [B"""\x28\xb5\x2F\xFD"""]
@staticmethod
def __magic_name__ ( __A : Union[Path, str], __A : Union[Path, str] ):
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('''Please pip install zstandard''' )
import zstandard as zstd
UpperCAmelCase : List[str] = zstd.ZstdDecompressor()
with open(__A, '''rb''' ) as ifh, open(__A, '''wb''' ) as ofh:
dctx.copy_stream(__A, __A )
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = [B"""\x42\x5A\x68"""]
@staticmethod
def __magic_name__ ( __A : Union[Path, str], __A : Union[Path, str] ):
with bza.open(__A, '''rb''' ) as compressed_file:
with open(__A, '''wb''' ) as extracted_file:
shutil.copyfileobj(__A, __A )
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = [B"""\x37\x7A\xBC\xAF\x27\x1C"""]
@staticmethod
def __magic_name__ ( __A : Union[Path, str], __A : Union[Path, str] ):
if not config.PY7ZR_AVAILABLE:
raise ImportError('''Please pip install py7zr''' )
import pyazr
os.makedirs(__A, exist_ok=__A )
with pyazr.SevenZipFile(__A, '''r''' ) as archive:
archive.extractall(__A )
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = [B"""\x04\x22\x4D\x18"""]
@staticmethod
def __magic_name__ ( __A : Union[Path, str], __A : Union[Path, str] ):
if not config.LZ4_AVAILABLE:
raise ImportError('''Please pip install lz4''' )
import lza.frame
with lza.frame.open(__A, '''rb''' ) as compressed_file:
with open(__A, '''wb''' ) as extracted_file:
shutil.copyfileobj(__A, __A )
class __UpperCAmelCase :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
UpperCamelCase = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def __magic_name__ ( cls : Dict ):
return max(
len(__A )
for extractor in cls.extractors.values()
if issubclass(__A, __A )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def __magic_name__ ( __A : Union[Path, str], __A : int ):
try:
return MagicNumberBaseExtractor.read_magic_number(__A, magic_number_length=__A )
except OSError:
return b""
@classmethod
def __magic_name__ ( cls : Dict, __A : Union[Path, str], __A : bool = False ):
warnings.warn(
'''Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'infer_extractor_format\' instead.''', category=__A, )
UpperCAmelCase : Dict = cls.infer_extractor_format(__A )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def __magic_name__ ( cls : Optional[Any], __A : Union[Path, str] ): # <Added version="2.4.0"/>
UpperCAmelCase : Tuple = cls._get_magic_number_max_length()
UpperCAmelCase : Tuple = cls._read_magic_number(__A, __A )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(__A, magic_number=__A ):
return extractor_format
@classmethod
def __magic_name__ ( cls : Optional[Any], __A : Union[Path, str], __A : Union[Path, str], __A : Optional[str] = None, __A : Optional[BaseExtractor] = "deprecated", ):
os.makedirs(os.path.dirname(__A ), exist_ok=__A )
# Prevent parallel extractions
UpperCAmelCase : Optional[int] = str(Path(__A ).with_suffix('''.lock''' ) )
with FileLock(__A ):
shutil.rmtree(__A, ignore_errors=__A )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(__A, __A ): # passed as positional arg
warnings.warn(
'''Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'extractor_format\' instead.''', category=__A, )
UpperCAmelCase : List[str] = extractor if extractor != '''deprecated''' else extractor_format
else:
UpperCAmelCase : Optional[int] = cls.extractors[extractor_format]
return extractor.extract(__A, __A )
else:
warnings.warn(
'''Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '''
'''exception in 3.0.0.''', category=__A, )
for extractor in cls.extractors.values():
if extractor.is_extractable(__A ):
return extractor.extract(__A, __A )
| 99 | 0 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
lowerCamelCase_ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , __A , __A ) -> Any:
super().__init__()
self.register_modules(unet=__A , scheduler=__A )
@torch.no_grad()
def __call__( self , __A = 1 , __A = 100 , __A = None , __A = None , __A = True , ) -> Union[AudioPipelineOutput, Tuple]:
if audio_length_in_s is None:
a =self.unet.config.sample_size / self.unet.config.sample_rate
a =audio_length_in_s * self.unet.config.sample_rate
a =2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
a =int(__A )
if sample_size % down_scale_factor != 0:
a =(
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
''' process.''' )
a =int(__A )
a =next(iter(self.unet.parameters() ) ).dtype
a =(batch_size, self.unet.config.in_channels, sample_size)
if isinstance(__A , __A ) and len(__A ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(__A )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
a =randn_tensor(__A , generator=__A , device=self.device , dtype=__A )
# set step values
self.scheduler.set_timesteps(__A , device=audio.device )
a =self.scheduler.timesteps.to(__A )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
a =self.unet(__A , __A ).sample
# 2. compute previous image: x_t -> t_t-1
a =self.scheduler.step(__A , __A , __A ).prev_sample
a =audio.clamp(-1 , 1 ).float().cpu().numpy()
a =audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=__A ) | 81 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : str = ["""NllbTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = ["""NllbTokenizerFast"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
lowerCamelCase_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 81 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Dict = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
UpperCAmelCase__ : Tuple = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
UpperCAmelCase__ : List[Any] = model(_lowerCamelCase )["""last_hidden_state"""]
UpperCAmelCase__ : Optional[int] = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , _lowerCamelCase )
# compare the actual values for a slice.
UpperCAmelCase__ : str = tf.convert_to_tensor(
[[[-0.0_254, 0.0_235, 0.1_027], [0.0_606, -0.1_811, -0.0_418], [-0.1_561, -0.1_127, 0.2_687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 166 |
"""simple docstring"""
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ['image_processor', 'tokenizer']
SCREAMING_SNAKE_CASE = 'OwlViTImageProcessor'
SCREAMING_SNAKE_CASE = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__(self , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , _lowerCamelCase , )
UpperCAmelCase__ : Optional[int] = kwargs.pop("""feature_extractor""" )
UpperCAmelCase__ : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_lowerCamelCase , _lowerCamelCase )
def __call__(self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="max_length" , _lowerCamelCase="np" , **_lowerCamelCase ):
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
"""You have to specify at least one text or query image or image. All three cannot be none.""" )
if text is not None:
if isinstance(_lowerCamelCase , _lowerCamelCase ) or (isinstance(_lowerCamelCase , _lowerCamelCase ) and not isinstance(text[0] , _lowerCamelCase )):
UpperCAmelCase__ : Any = [self.tokenizer(_lowerCamelCase , padding=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )]
elif isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(text[0] , _lowerCamelCase ):
UpperCAmelCase__ : Any = []
# Maximum number of queries across batch
UpperCAmelCase__ : int = max([len(_lowerCamelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(_lowerCamelCase ) != max_num_queries:
UpperCAmelCase__ : Optional[int] = t + [""" """] * (max_num_queries - len(_lowerCamelCase ))
UpperCAmelCase__ : Union[str, Any] = self.tokenizer(_lowerCamelCase , padding=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )
encodings.append(_lowerCamelCase )
else:
raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" )
if return_tensors == "np":
UpperCAmelCase__ : Optional[Any] = np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
UpperCAmelCase__ : Any = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
UpperCAmelCase__ : Any = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
UpperCAmelCase__ : List[str] = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
UpperCAmelCase__ : str = torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0 )
UpperCAmelCase__ : int = torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
UpperCAmelCase__ : Any = tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
UpperCAmelCase__ : Tuple = tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
else:
raise ValueError("""Target return tensor type could not be returned""" )
UpperCAmelCase__ : Dict = BatchEncoding()
UpperCAmelCase__ : int = input_ids
UpperCAmelCase__ : Optional[int] = attention_mask
if query_images is not None:
UpperCAmelCase__ : int = BatchEncoding()
UpperCAmelCase__ : Optional[int] = self.image_processor(
_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase ).pixel_values
UpperCAmelCase__ : List[Any] = query_pixel_values
if images is not None:
UpperCAmelCase__ : Any = self.image_processor(_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )
if text is not None and images is not None:
UpperCAmelCase__ : List[str] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
UpperCAmelCase__ : int = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowerCamelCase ) , tensor_type=_lowerCamelCase )
def _a (self , *_lowerCamelCase , **_lowerCamelCase ):
"""simple docstring"""
return self.image_processor.post_process(*_lowerCamelCase , **_lowerCamelCase )
def _a (self , *_lowerCamelCase , **_lowerCamelCase ):
"""simple docstring"""
return self.image_processor.post_process_object_detection(*_lowerCamelCase , **_lowerCamelCase )
def _a (self , *_lowerCamelCase , **_lowerCamelCase ):
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*_lowerCamelCase , **_lowerCamelCase )
def _a (self , *_lowerCamelCase , **_lowerCamelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase )
def _a (self , *_lowerCamelCase , **_lowerCamelCase ):
"""simple docstring"""
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase )
@property
def _a (self ):
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _lowerCamelCase , )
return self.image_processor_class
@property
def _a (self ):
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , _lowerCamelCase , )
return self.image_processor
| 166 | 1 |
'''simple docstring'''
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def a_ ( __snake_case : Any ) -> int:
"""simple docstring"""
lowerCamelCase_ =checkpoints.load_tax_checkpoint(__snake_case )
lowerCamelCase_ =flatten_dict(__snake_case )
return flax_params
def a_ ( __snake_case : Dict ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ ={}
lowerCamelCase_ ={
'''token_embedder''': '''embeddings''',
'''encoder_norm''': '''layernorm''',
'''kernel''': '''weight''',
'''.out''': '''.output''',
'''scale''': '''weight''',
'''embedders_0.pos_embedding''': '''row_embedder.weight''',
'''embedders_1.pos_embedding''': '''column_embedder.weight''',
}
lowerCamelCase_ ={
'''query''': '''attention.query''',
'''key''': '''attention.key''',
'''value''': '''attention.value''',
'''output.dense''': '''output''',
'''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''',
'''pre_self_attention_layer_norm''': '''self_attention.layer_norm''',
'''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''',
'''mlp.''': '''mlp.DenseReluDense.''',
'''pre_mlp_layer_norm''': '''mlp.layer_norm''',
'''self_attention.o''': '''self_attention.attention.o''',
'''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''',
'''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''',
'''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.logits_dense.weight''': '''decoder.lm_head.weight''',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
lowerCamelCase_ ='''.'''.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
lowerCamelCase_ =new_key.replace(__snake_case , __snake_case )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
lowerCamelCase_ =new_key.replace(__snake_case , __snake_case )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
lowerCamelCase_ =re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , __snake_case )
lowerCamelCase_ =new_key.replace('''encoder''' , '''encoder.encoder''' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
lowerCamelCase_ =re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , __snake_case )
lowerCamelCase_ =flax_dict[key]
lowerCamelCase_ ={}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
lowerCamelCase_ =torch.from_numpy(converted_dict[key].T )
else:
lowerCamelCase_ =torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def a_ ( __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : Any=False , __snake_case : Optional[int]=False ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =get_flax_param(__snake_case )
if not use_large:
lowerCamelCase_ =PixaStructVisionConfig()
lowerCamelCase_ =PixaStructTextConfig()
else:
lowerCamelCase_ =PixaStructVisionConfig(
hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 )
lowerCamelCase_ =PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 )
lowerCamelCase_ =PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=__snake_case )
lowerCamelCase_ =PixaStructForConditionalGeneration(__snake_case )
lowerCamelCase_ =rename_and_convert_flax_params(__snake_case )
model.load_state_dict(__snake_case )
lowerCamelCase_ =AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' )
lowerCamelCase_ =PixaStructImageProcessor()
lowerCamelCase_ =PixaStructProcessor(image_processor=__snake_case , tokenizer=__snake_case )
if use_large:
lowerCamelCase_ =4096
lowerCamelCase_ =True
# mkdir if needed
os.makedirs(__snake_case , exist_ok=__snake_case )
model.save_pretrained(__snake_case )
processor.save_pretrained(__snake_case )
print('''Model saved in {}'''.format(__snake_case ) )
if __name__ == "__main__":
a_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument("""--t5x_checkpoint_path""", default=None, type=str, help="""Path to the original T5x checkpoint.""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--use_large""", action="""store_true""", help="""Use large model.""")
parser.add_argument("""--is_vqa""", action="""store_true""", help="""Use large model.""")
a_ : Tuple = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 75 |
'''simple docstring'''
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
a_ : Any = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, **lowerCAmelCase ):
"""simple docstring"""
super().__init__(**lowerCAmelCase )
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
# No specific FOR_XXX available yet
def __call__( self, lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return super().__call__(lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ ={}
if "candidate_labels" in kwargs:
lowerCamelCase_ =kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
lowerCamelCase_ =kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None, lowerCAmelCase="This is a sound of {}." ):
"""simple docstring"""
if isinstance(lowerCAmelCase, lowerCAmelCase ):
if audio.startswith('''http://''' ) or audio.startswith('''https://''' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
lowerCamelCase_ =requests.get(lowerCAmelCase ).content
else:
with open(lowerCAmelCase, '''rb''' ) as f:
lowerCamelCase_ =f.read()
if isinstance(lowerCAmelCase, lowerCAmelCase ):
lowerCamelCase_ =ffmpeg_read(lowerCAmelCase, self.feature_extractor.sampling_rate )
if not isinstance(lowerCAmelCase, np.ndarray ):
raise ValueError('''We expect a numpy ndarray as input''' )
if len(audio.shape ) != 1:
raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' )
lowerCamelCase_ =self.feature_extractor(
[audio], sampling_rate=self.feature_extractor.sampling_rate, return_tensors='''pt''' )
lowerCamelCase_ =candidate_labels
lowerCamelCase_ =[hypothesis_template.format(lowerCAmelCase ) for x in candidate_labels]
lowerCamelCase_ =self.tokenizer(lowerCAmelCase, return_tensors=self.framework, padding=lowerCAmelCase )
lowerCamelCase_ =[text_inputs]
return inputs
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =model_inputs.pop('''candidate_labels''' )
lowerCamelCase_ =model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0], lowerCAmelCase ):
lowerCamelCase_ =text_inputs[0]
else:
# Batching case.
lowerCamelCase_ =text_inputs[0][0]
lowerCamelCase_ =self.model(**lowerCAmelCase, **lowerCAmelCase )
lowerCamelCase_ ={
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_audio,
}
return model_outputs
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =model_outputs.pop('''candidate_labels''' )
lowerCamelCase_ =model_outputs['''logits'''][0]
if self.framework == "pt":
lowerCamelCase_ =logits.softmax(dim=0 )
lowerCamelCase_ =probs.tolist()
else:
raise ValueError('''`tf` framework not supported.''' )
lowerCamelCase_ =[
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(lowerCAmelCase, lowerCAmelCase ), key=lambda lowerCAmelCase : -x[0] )
]
return result
| 75 | 1 |
'''simple docstring'''
import math
def snake_case__ ( lowerCamelCase__ : str ) -> bool:
return math.sqrt(lowerCamelCase__ ) * math.sqrt(lowerCamelCase__ ) == num
def snake_case__ ( lowerCamelCase__ : Tuple ) -> bool:
A_ : Any = 0
A_ : Optional[int] = n
while left <= right:
A_ : Any = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
A_ : Optional[int] = mid - 1
else:
A_ : Dict = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 371 |
'''simple docstring'''
import heapq
def snake_case__ ( lowerCamelCase__ : dict ) -> set[int]:
A_ : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowerCamelCase__ , [-1 * len(lowerCamelCase__ ), (key, value)] )
# chosen_vertices = set of chosen vertices
A_ : str = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
A_ : Tuple = heapq.heappop(lowerCamelCase__ )[1][0]
chosen_vertices.add(lowerCamelCase__ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
A_ : List[str] = elem[1][1].index(lowerCamelCase__ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowerCamelCase__ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case__ = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F'Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}')
| 4 | 0 |
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class _a ( __a ):
__a : Dict = """"""
__a : str = """hf-legacy""" # "hf://"" is reserved for hffs
def __init__( self : str , lowercase : Optional[DatasetInfo] = None , lowercase : Optional[str] = None , **lowercase : Tuple , ):
'''simple docstring'''
super().__init__(self , **lowercase )
UpperCAmelCase = repo_info
UpperCAmelCase = token
UpperCAmelCase = None
def A ( self : Optional[int] ):
'''simple docstring'''
if self.dir_cache is None:
UpperCAmelCase = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
UpperCAmelCase = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(lowercase ): {'''name''': str(lowercase ), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def A ( self : Tuple , lowercase : str , lowercase : str = "rb" , **lowercase : Dict , ):
'''simple docstring'''
if not isinstance(self.repo_info , lowercase ):
raise NotImplementedError(f"Open is only implemented for dataset repositories, but got {self.repo_info}" )
UpperCAmelCase = hf_hub_url(self.repo_info.id , lowercase , revision=self.repo_info.sha )
return fsspec.open(
lowercase , mode=lowercase , headers=get_authentication_headers_for_url(lowercase , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open()
def A ( self : List[str] , lowercase : Optional[int] , **lowercase : List[Any] ):
'''simple docstring'''
self._get_dirs()
UpperCAmelCase = self._strip_protocol(lowercase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(lowercase )
def A ( self : List[Any] , lowercase : Union[str, Any] , lowercase : Optional[Any]=False , **lowercase : Any ):
'''simple docstring'''
self._get_dirs()
UpperCAmelCase = PurePosixPath(path.strip('''/''' ) )
UpperCAmelCase = {}
for p, f in self.dir_cache.items():
UpperCAmelCase = PurePosixPath(p.strip('''/''' ) )
UpperCAmelCase = p.parent
if root == path:
UpperCAmelCase = f
UpperCAmelCase = list(paths.values() )
if detail:
return out
else:
return sorted(f['''name'''] for f in out )
| 34 |
"""simple docstring"""
from functools import lru_cache
def snake_case__ ( __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =2
lowerCamelCase__ : Optional[int] =set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__lowerCamelCase )
if n > 1:
factors.add(__lowerCamelCase )
return factors
@lru_cache
def snake_case__ ( __lowerCamelCase : int ):
"""simple docstring"""
return len(unique_prime_factors(__lowerCamelCase ) )
def snake_case__ ( __lowerCamelCase : list ):
"""simple docstring"""
return len(set(__lowerCamelCase ) ) in (0, 1)
def snake_case__ ( __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : Tuple =2
while True:
# Increment each value of a generated range
lowerCamelCase__ : Tuple =[base + i for i in range(__lowerCamelCase )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
lowerCamelCase__ : Optional[Any] =[upf_len(__lowerCamelCase ) for x in group]
checker.append(__lowerCamelCase )
# If all numbers in the list are equal, return the group variable.
if equality(__lowerCamelCase ):
return group
# Increment our base variable by 1
base += 1
def snake_case__ ( __lowerCamelCase : int = 4 ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =run(__lowerCamelCase )
return results[0] if len(__lowerCamelCase ) else None
if __name__ == "__main__":
print(solution())
| 238 | 0 |
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = CustomTokenizer
pass
| 176 |
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class __lowercase (UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = IFPipeline
_snake_case = TEXT_TO_IMAGE_PARAMS - {"""width""", """height""", """latents"""}
_snake_case = TEXT_TO_IMAGE_BATCH_PARAMS
_snake_case = PipelineTesterMixin.required_optional_params - {"""latents"""}
def UpperCAmelCase ( self ) -> Any:
return self._get_dummy_components()
def UpperCAmelCase ( self , A , A=0 ) -> Optional[int]:
if str(A ).startswith("""mps""" ):
snake_case : List[str] = torch.manual_seed(A )
else:
snake_case : Optional[int] = torch.Generator(device=A ).manual_seed(A )
snake_case : Optional[int] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase ( self ) -> Any:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def UpperCAmelCase ( self ) -> List[str]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def UpperCAmelCase ( self ) -> Dict:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def UpperCAmelCase ( self ) -> List[str]:
self._test_save_load_local()
def UpperCAmelCase ( self ) -> List[str]:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def UpperCAmelCase ( self ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self ) -> List[Any]:
# if
snake_case : Tuple = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa )
snake_case : Tuple = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=A , tokenizer=A )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""" )
snake_case , snake_case : Optional[int] = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
snake_case : List[str] = None
snake_case : List[Any] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(A , A , A , A )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
snake_case : Any = IFImgaImgPipeline(**pipe_a.components )
snake_case : Dict = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(A , A , A , A )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
snake_case : Optional[Any] = IFInpaintingPipeline(**pipe_a.components )
snake_case : Any = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(A , A , A , A )
def UpperCAmelCase ( self , A , A , A , A ) -> str:
# pipeline 1
_start_torch_memory_measurement()
snake_case : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case : Tuple = pipe_a(
prompt_embeds=A , negative_prompt_embeds=A , num_inference_steps=2 , generator=A , output_type="""np""" , )
snake_case : Optional[int] = output.images[0]
assert image.shape == (6_4, 6_4, 3)
snake_case : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_3 * 1_0**9
snake_case : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""" )
assert_mean_pixel_difference(A , A )
# pipeline 2
_start_torch_memory_measurement()
snake_case : List[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case : Optional[int] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(A )
snake_case : str = pipe_a(
prompt_embeds=A , negative_prompt_embeds=A , image=A , generator=A , num_inference_steps=2 , output_type="""np""" , )
snake_case : str = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
snake_case : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
snake_case : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(A , A )
def UpperCAmelCase ( self , A , A , A , A ) -> int:
# pipeline 1
_start_torch_memory_measurement()
snake_case : Optional[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(A )
snake_case : List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case : Union[str, Any] = pipe_a(
prompt_embeds=A , negative_prompt_embeds=A , image=A , num_inference_steps=2 , generator=A , output_type="""np""" , )
snake_case : Optional[int] = output.images[0]
assert image.shape == (6_4, 6_4, 3)
snake_case : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
snake_case : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""" )
assert_mean_pixel_difference(A , A )
# pipeline 2
_start_torch_memory_measurement()
snake_case : int = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case : int = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(A )
snake_case : Optional[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(A )
snake_case : int = pipe_a(
prompt_embeds=A , negative_prompt_embeds=A , image=A , original_image=A , generator=A , num_inference_steps=2 , output_type="""np""" , )
snake_case : List[Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
snake_case : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
snake_case : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(A , A )
def UpperCAmelCase ( self , A , A , A , A ) -> Any:
# pipeline 1
_start_torch_memory_measurement()
snake_case : List[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(A )
snake_case : Union[str, Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(1 ) ).to(A )
snake_case : Dict = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case : Tuple = pipe_a(
prompt_embeds=A , negative_prompt_embeds=A , image=A , mask_image=A , num_inference_steps=2 , generator=A , output_type="""np""" , )
snake_case : Tuple = output.images[0]
assert image.shape == (6_4, 6_4, 3)
snake_case : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
snake_case : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""" )
assert_mean_pixel_difference(A , A )
# pipeline 2
_start_torch_memory_measurement()
snake_case : Optional[int] = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case : int = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(A )
snake_case : Any = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(A )
snake_case : str = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(1 ) ).to(A )
snake_case : List[str] = pipe_a(
prompt_embeds=A , negative_prompt_embeds=A , image=A , mask_image=A , original_image=A , generator=A , num_inference_steps=2 , output_type="""np""" , )
snake_case : List[Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
snake_case : Union[str, Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
snake_case : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(A , A )
def SCREAMING_SNAKE_CASE__ ( ) -> str:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 176 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str=None ) -> Tuple:
'''simple docstring'''
if subparsers is not None:
A__ = subparsers.add_parser("test" )
else:
A__ = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=SCREAMING_SNAKE_CASE_ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE_ )
return parser
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] ) -> List[str]:
'''simple docstring'''
A__ = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
A__ = script_name
else:
A__ = F'--config_file={args.config_file} {script_name}'
A__ = ["accelerate-launch"] + test_args.split()
A__ = execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def lowerCAmelCase__ ( ) -> List[str]:
'''simple docstring'''
A__ = test_command_parser()
A__ = parser.parse_args()
test_command(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 68 |
'''simple docstring'''
a_ = {
'meter': 'm',
'kilometer': 'km',
'megametre': 'Mm',
'gigametre': 'Gm',
'terametre': 'Tm',
'petametre': 'Pm',
'exametre': 'Em',
'zettametre': 'Zm',
'yottametre': 'Ym',
}
# Exponent of the factor(meter)
a_ = {
'm': 0,
'km': 3,
'Mm': 6,
'Gm': 9,
'Tm': 1_2,
'Pm': 1_5,
'Em': 1_8,
'Zm': 2_1,
'Ym': 2_4,
}
def _a( UpperCamelCase__ : float, UpperCamelCase__ : str, UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any =from_type.lower().strip('''s''' )
SCREAMING_SNAKE_CASE__ : Tuple =to_type.lower().strip('''s''' )
SCREAMING_SNAKE_CASE__ : List[Any] =UNIT_SYMBOL.get(UpperCamelCase__, UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[int] =UNIT_SYMBOL.get(UpperCamelCase__, UpperCamelCase__ )
if from_sanitized not in METRIC_CONVERSION:
SCREAMING_SNAKE_CASE__ : Optional[int] =(
f"Invalid 'from_type' value: {from_type!r}.\n"
f"Conversion abbreviations are: {', '.join(UpperCamelCase__ )}"
)
raise ValueError(UpperCamelCase__ )
if to_sanitized not in METRIC_CONVERSION:
SCREAMING_SNAKE_CASE__ : List[str] =(
f"Invalid 'to_type' value: {to_type!r}.\n"
f"Conversion abbreviations are: {', '.join(UpperCamelCase__ )}"
)
raise ValueError(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Any =METRIC_CONVERSION[from_sanitized]
SCREAMING_SNAKE_CASE__ : Any =METRIC_CONVERSION[to_sanitized]
SCREAMING_SNAKE_CASE__ : Optional[int] =1
if from_exponent > to_exponent:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =from_exponent - to_exponent
else:
SCREAMING_SNAKE_CASE__ : Tuple =-(to_exponent - from_exponent)
return value * pow(1_0, UpperCamelCase__ )
if __name__ == "__main__":
from doctest import testmod
testmod() | 152 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( unittest.TestCase ):
@slow
def a__ ( self :Tuple ):
snake_case_ : Tuple = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
snake_case_ : Optional[Any] = tf.convert_to_tensor(
[[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]] ,dtype=tf.intaa ,) # J'aime le camembert !"
snake_case_ : Dict = model(_UpperCamelCase )["""last_hidden_state"""]
snake_case_ : List[str] = tf.TensorShape((1, 1_0, 7_6_8) )
self.assertEqual(output.shape ,_UpperCamelCase )
# compare the actual values for a slice.
snake_case_ : Any = tf.convert_to_tensor(
[[[-0.02_54, 0.02_35, 0.10_27], [0.06_06, -0.18_11, -0.04_18], [-0.15_61, -0.11_27, 0.26_87]]] ,dtype=tf.floataa ,)
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1E-4 ) ) | 8 |
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :str=True , lowerCamelCase_ :str="pt" ):
'''simple docstring'''
snake_case_ : Tuple = {"""add_prefix_space""": True} if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and not line.startswith(""" """ ) else {}
snake_case_ : Union[str, Any] = padding_side
return tokenizer(
[line] , max_length=lowerCamelCase_ , padding="""max_length""" if pad_to_max_length else None , truncation=lowerCamelCase_ , return_tensors=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , **lowerCamelCase_ , )
def UpperCAmelCase ( lowerCamelCase_ :Optional[int] , lowerCamelCase_ :str , lowerCamelCase_ :Any=None , ):
'''simple docstring'''
snake_case_ : Dict = input_ids.ne(lowerCamelCase_ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __UpperCamelCase ( lowercase__ ):
def __init__( self :List[Any] ,_UpperCamelCase :List[Any] ,_UpperCamelCase :Any ,_UpperCamelCase :int ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :Any="train" ,_UpperCamelCase :Optional[int]=None ,_UpperCamelCase :int=None ,_UpperCamelCase :List[Any]=None ,_UpperCamelCase :Optional[int]="" ,):
super().__init__()
snake_case_ : List[str] = Path(_UpperCamelCase ).joinpath(type_path + """.source""" )
snake_case_ : int = Path(_UpperCamelCase ).joinpath(type_path + """.target""" )
snake_case_ : Optional[int] = self.get_char_lens(self.src_file )
snake_case_ : List[str] = max_source_length
snake_case_ : str = max_target_length
assert min(self.src_lens ) > 0, F'''found empty line in {self.src_file}'''
snake_case_ : str = tokenizer
snake_case_ : str = prefix
if n_obs is not None:
snake_case_ : int = self.src_lens[:n_obs]
snake_case_ : Tuple = src_lang
snake_case_ : str = tgt_lang
def __len__( self :Any ):
return len(self.src_lens )
def __getitem__( self :List[str] ,_UpperCamelCase :Union[str, Any] ):
snake_case_ : Optional[int] = index + 1 # linecache starts at 1
snake_case_ : Dict = self.prefix + linecache.getline(str(self.src_file ) ,_UpperCamelCase ).rstrip("""\n""" )
snake_case_ : List[Any] = linecache.getline(str(self.tgt_file ) ,_UpperCamelCase ).rstrip("""\n""" )
assert source_line, F'''empty source line for index {index}'''
assert tgt_line, F'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer ,_UpperCamelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
snake_case_ : int = (
self.tokenizer.question_encoder if isinstance(self.tokenizer ,_UpperCamelCase ) else self.tokenizer
)
snake_case_ : Optional[int] = self.tokenizer.generator if isinstance(self.tokenizer ,_UpperCamelCase ) else self.tokenizer
snake_case_ : Optional[Any] = encode_line(_UpperCamelCase ,_UpperCamelCase ,self.max_source_length ,"""right""" )
snake_case_ : Tuple = encode_line(_UpperCamelCase ,_UpperCamelCase ,self.max_target_length ,"""right""" )
snake_case_ : int = source_inputs["""input_ids"""].squeeze()
snake_case_ : str = target_inputs["""input_ids"""].squeeze()
snake_case_ : Union[str, Any] = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def a__ ( _UpperCamelCase :str ):
return [len(_UpperCamelCase ) for x in Path(_UpperCamelCase ).open().readlines()]
def a__ ( self :Optional[int] ,_UpperCamelCase :List[str] ):
snake_case_ : Optional[Any] = torch.stack([x["""input_ids"""] for x in batch] )
snake_case_ : List[Any] = torch.stack([x["""attention_mask"""] for x in batch] )
snake_case_ : Union[str, Any] = torch.stack([x["""decoder_input_ids"""] for x in batch] )
snake_case_ : Optional[Any] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer ,_UpperCamelCase )
else self.tokenizer.pad_token_id
)
snake_case_ : Tuple = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer ,_UpperCamelCase )
else self.tokenizer.pad_token_id
)
snake_case_ : Optional[int] = trim_batch(_UpperCamelCase ,_UpperCamelCase )
snake_case_ , snake_case_ : Dict = trim_batch(_UpperCamelCase ,_UpperCamelCase ,attention_mask=_UpperCamelCase )
snake_case_ : Optional[int] = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
__A : List[Any] = getLogger(__name__)
def UpperCAmelCase ( lowerCamelCase_ :List[List] ):
'''simple docstring'''
return list(itertools.chain.from_iterable(lowerCamelCase_ ) )
def UpperCAmelCase ( lowerCamelCase_ :str ):
'''simple docstring'''
snake_case_ : int = get_git_info()
save_json(lowerCamelCase_ , os.path.join(lowerCamelCase_ , """git_log.json""" ) )
def UpperCAmelCase ( lowerCamelCase_ :Optional[int] , lowerCamelCase_ :int , lowerCamelCase_ :Optional[int]=4 , **lowerCamelCase_ :Union[str, Any] ):
'''simple docstring'''
with open(lowerCamelCase_ , """w""" ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ , indent=lowerCamelCase_ , **lowerCamelCase_ )
def UpperCAmelCase ( lowerCamelCase_ :List[Any] ):
'''simple docstring'''
with open(lowerCamelCase_ ) as f:
return json.load(lowerCamelCase_ )
def UpperCAmelCase ( ):
'''simple docstring'''
snake_case_ : Optional[Any] = git.Repo(search_parent_directories=lowerCamelCase_ )
snake_case_ : List[str] = {
"""repo_id""": str(lowerCamelCase_ ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
"""hostname""": str(socket.gethostname() ),
}
return repo_infos
def UpperCAmelCase ( lowerCamelCase_ :Callable , lowerCamelCase_ :Iterable ):
'''simple docstring'''
return list(map(lowerCamelCase_ , lowerCamelCase_ ) )
def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int ):
'''simple docstring'''
with open(lowerCamelCase_ , """wb""" ) as f:
return pickle.dump(lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase ( lowerCamelCase_ :Dict ):
'''simple docstring'''
def remove_articles(lowerCamelCase_ :str ):
return re.sub(R"""\b(a|an|the)\b""" , """ """ , lowerCamelCase_ )
def white_space_fix(lowerCamelCase_ :Optional[Any] ):
return " ".join(text.split() )
def remove_punc(lowerCamelCase_ :Tuple ):
snake_case_ : Union[str, Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowerCamelCase_ :Optional[Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase_ ) ) ) )
def UpperCAmelCase ( lowerCamelCase_ :List[Any] , lowerCamelCase_ :Optional[int] ):
'''simple docstring'''
snake_case_ : List[Any] = normalize_answer(lowerCamelCase_ ).split()
snake_case_ : Optional[int] = normalize_answer(lowerCamelCase_ ).split()
snake_case_ : List[Any] = Counter(lowerCamelCase_ ) & Counter(lowerCamelCase_ )
snake_case_ : Optional[Any] = sum(common.values() )
if num_same == 0:
return 0
snake_case_ : Optional[Any] = 1.0 * num_same / len(lowerCamelCase_ )
snake_case_ : Union[str, Any] = 1.0 * num_same / len(lowerCamelCase_ )
snake_case_ : Optional[Any] = (2 * precision * recall) / (precision + recall)
return fa
def UpperCAmelCase ( lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Union[str, Any] ):
'''simple docstring'''
return normalize_answer(lowerCamelCase_ ) == normalize_answer(lowerCamelCase_ )
def UpperCAmelCase ( lowerCamelCase_ :List[str] , lowerCamelCase_ :List[str] ):
'''simple docstring'''
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ )
snake_case_ : Optional[int] = 0
for hypo, pred in zip(lowerCamelCase_ , lowerCamelCase_ ):
em += exact_match_score(lowerCamelCase_ , lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
em /= len(lowerCamelCase_ )
return {"em": em}
def UpperCAmelCase ( lowerCamelCase_ :Any ):
'''simple docstring'''
return model_prefix.startswith("""rag""" )
def UpperCAmelCase ( lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Any , lowerCamelCase_ :Union[str, Any] ):
'''simple docstring'''
snake_case_ : List[str] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
snake_case_ : Optional[int] = """dropout_rate"""
for p in extra_params:
if getattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
if not hasattr(lowerCamelCase_ , lowerCamelCase_ ) and not hasattr(lowerCamelCase_ , equivalent_param[p] ):
logger.info("""config doesn't have a `{}` attribute""".format(lowerCamelCase_ ) )
delattr(lowerCamelCase_ , lowerCamelCase_ )
continue
snake_case_ : str = p if hasattr(lowerCamelCase_ , lowerCamelCase_ ) else equivalent_param[p]
setattr(lowerCamelCase_ , lowerCamelCase_ , getattr(lowerCamelCase_ , lowerCamelCase_ ) )
delattr(lowerCamelCase_ , lowerCamelCase_ )
return hparams, config | 8 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a : Optional[int] = logging.get_logger(__name__)
_a : List[Any] = {
'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json',
'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json',
'kssteven/ibert-roberta-large-mnli': (
'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'
),
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[str] = "ibert"
def __init__( self , a__=30522 , a__=768 , a__=12 , a__=12 , a__=3072 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=2 , a__=0.0_2 , a__=1e-12 , a__=1 , a__=0 , a__=2 , a__="absolute" , a__=False , a__="none" , **a__ , ):
super().__init__(pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , **a__ )
_lowerCAmelCase : List[str] = vocab_size
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : int = num_hidden_layers
_lowerCAmelCase : Union[str, Any] = num_attention_heads
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : List[Any] = intermediate_size
_lowerCAmelCase : List[str] = hidden_dropout_prob
_lowerCAmelCase : Dict = attention_probs_dropout_prob
_lowerCAmelCase : List[str] = max_position_embeddings
_lowerCAmelCase : Dict = type_vocab_size
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Union[str, Any] = layer_norm_eps
_lowerCAmelCase : Optional[int] = position_embedding_type
_lowerCAmelCase : Any = quant_mode
_lowerCAmelCase : Union[str, Any] = force_dequant
class __A ( SCREAMING_SNAKE_CASE_ ):
@property
def __A ( self ):
if self.task == "multiple-choice":
_lowerCAmelCase : Any = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_lowerCAmelCase : Union[str, Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 44 | """simple docstring"""
from __future__ import annotations
_a : List[str] = 10
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list[int] ) -> list[int]:
_lowerCAmelCase : Optional[int] = 1
_lowerCAmelCase : Union[str, Any] = max(_lowerCamelCase )
while placement <= max_digit:
# declare and initialize empty buckets
_lowerCAmelCase : list[list] = [[] for _ in range(_lowerCamelCase )]
# split list_of_ints between the buckets
for i in list_of_ints:
_lowerCAmelCase : Tuple = int((i / placement) % RADIX )
buckets[tmp].append(_lowerCamelCase )
# put each buckets' contents into list_of_ints
_lowerCAmelCase : List[str] = 0
for b in range(_lowerCamelCase ):
for i in buckets[b]:
_lowerCAmelCase : Any = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44 | 1 |
from __future__ import annotations
class __lowercase :
'''simple docstring'''
def __init__( self : List[Any] , _a : int ):
UpperCamelCase__ = data
UpperCamelCase__ = None
UpperCamelCase__ = None
def lowerCamelCase_ ( UpperCamelCase__ : Node | None ): # In Order traversal of the tree
'''simple docstring'''
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def lowerCamelCase_ ( UpperCamelCase__ : Node | None ):
'''simple docstring'''
return 1 + max(depth_of_tree(tree.left ), depth_of_tree(tree.right ) ) if tree else 0
def lowerCamelCase_ ( UpperCamelCase__ : Node ):
'''simple docstring'''
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def lowerCamelCase_ ( ): # Main function for testing.
'''simple docstring'''
UpperCamelCase__ = Node(1 )
UpperCamelCase__ = Node(2 )
UpperCamelCase__ = Node(3 )
UpperCamelCase__ = Node(4 )
UpperCamelCase__ = Node(5 )
UpperCamelCase__ = Node(6 )
UpperCamelCase__ = Node(7 )
UpperCamelCase__ = Node(8 )
UpperCamelCase__ = Node(9 )
print(is_full_binary_tree(UpperCamelCase__ ) )
print(depth_of_tree(UpperCamelCase__ ) )
print('''Tree is: ''' )
display(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 355 | import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
lowercase = logging.get_logger(__name__)
class __lowercase ( A ):
'''simple docstring'''
def __init__( self : List[str] , *_a : Any , **_a : str ):
warnings.warn(
'''The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ChineseCLIPImageProcessor instead.''' , _a , )
super().__init__(*_a , **_a )
| 35 | 0 |
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Any = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {
'''microsoft/xprophetnet-large-wiki100-cased''': (
'''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json'''
),
}
class a__ ( A__ ):
A = 'xlm-prophetnet'
A = ['past_key_values']
A = {
'num_attention_heads': 'num_encoder_attention_heads',
}
def __init__( self : Optional[Any],_A : Optional[float] = 0.1,_A : Optional[Union[str, Callable]] = "gelu",_A : Optional[int] = 3_0522,_A : Optional[int] = 1024,_A : Optional[int] = 4096,_A : Optional[int] = 12,_A : Optional[int] = 16,_A : Optional[int] = 4096,_A : Optional[int] = 12,_A : Optional[int] = 16,_A : Optional[float] = 0.1,_A : Optional[float] = 0.1,_A : Optional[int] = 512,_A : Optional[float] = 0.02,_A : Optional[bool] = True,_A : Optional[bool] = True,_A : Optional[int] = 0,_A : Optional[int] = 2,_A : Optional[int] = 32,_A : Optional[int] = 128,_A : Optional[bool] = False,_A : Optional[float] = 0.0,_A : Optional[bool] = True,_A : Optional[int] = 0,_A : Optional[int] = 1,_A : Optional[int] = 2,**_A : str,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = vocab_size
SCREAMING_SNAKE_CASE_ : Tuple = hidden_size
SCREAMING_SNAKE_CASE_ : str = encoder_ffn_dim
SCREAMING_SNAKE_CASE_ : Any = num_encoder_layers
SCREAMING_SNAKE_CASE_ : Any = num_encoder_attention_heads
SCREAMING_SNAKE_CASE_ : int = decoder_ffn_dim
SCREAMING_SNAKE_CASE_ : Optional[int] = num_decoder_layers
SCREAMING_SNAKE_CASE_ : str = num_decoder_attention_heads
SCREAMING_SNAKE_CASE_ : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Optional[int] = init_std # Normal(0, this parameter)
SCREAMING_SNAKE_CASE_ : Dict = activation_function
# parameters for xlmprophetnet
SCREAMING_SNAKE_CASE_ : int = ngram
SCREAMING_SNAKE_CASE_ : Optional[int] = num_buckets
SCREAMING_SNAKE_CASE_ : Optional[int] = relative_max_distance
SCREAMING_SNAKE_CASE_ : List[Any] = disable_ngram_loss
SCREAMING_SNAKE_CASE_ : List[str] = eps
# 3 Types of Dropout
SCREAMING_SNAKE_CASE_ : str = attention_dropout
SCREAMING_SNAKE_CASE_ : Tuple = activation_dropout
SCREAMING_SNAKE_CASE_ : List[Any] = dropout
SCREAMING_SNAKE_CASE_ : Any = use_cache
super().__init__(
pad_token_id=_A,bos_token_id=_A,eos_token_id=_A,is_encoder_decoder=_A,add_cross_attention=_A,decoder_start_token_id=_A,**_A,)
@property
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def __UpperCamelCase ( self : Optional[int],_A : str ):
"""simple docstring"""
raise NotImplementedError(
"This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and"
" `num_decoder_layers`." )
| 18 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __UpperCAmelCase ( lowerCamelCase__ ):
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__A, '''tf_padding''' ) )
self.parent.assertTrue(hasattr(__A, '''depth_multiplier''' ) )
class __UpperCAmelCase :
def __init__( self : int, __A : List[Any], __A : str=1_3, __A : Dict=3, __A : int=3_2, __A : int=0.2_5, __A : List[str]=8, __A : int=8, __A : Dict=6, __A : str=3_2, __A : Any=True, __A : str=True, __A : int=True, __A : Union[str, Any]="relu6", __A : Any=1_2_8_0, __A : List[Any]=0.1, __A : Optional[Any]=0.0_2, __A : Tuple=True, __A : List[Any]=True, __A : str=1_0, __A : Optional[Any]=None, ):
UpperCAmelCase : Optional[int] = parent
UpperCAmelCase : List[str] = batch_size
UpperCAmelCase : List[str] = num_channels
UpperCAmelCase : str = image_size
UpperCAmelCase : Optional[int] = depth_multiplier
UpperCAmelCase : Union[str, Any] = depth_divisible_by
UpperCAmelCase : Optional[Any] = min_depth
UpperCAmelCase : List[str] = expand_ratio
UpperCAmelCase : Dict = tf_padding
UpperCAmelCase : str = output_stride
UpperCAmelCase : Union[str, Any] = first_layer_is_expansion
UpperCAmelCase : List[Any] = finegrained_output
UpperCAmelCase : Optional[Any] = hidden_act
UpperCAmelCase : str = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
UpperCAmelCase : Optional[Any] = classifier_dropout_prob
UpperCAmelCase : Dict = use_labels
UpperCAmelCase : List[str] = is_training
UpperCAmelCase : Tuple = num_labels
UpperCAmelCase : Union[str, Any] = initializer_range
UpperCAmelCase : Any = scope
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : Dict = None
UpperCAmelCase : Any = None
if self.use_labels:
UpperCAmelCase : Dict = ids_tensor([self.batch_size], self.num_labels )
UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
UpperCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def __magic_name__ ( self : Any ):
return MobileNetVaConfig(
num_channels=self.num_channels, image_size=self.image_size, depth_multiplier=self.depth_multiplier, depth_divisible_by=self.depth_divisible_by, min_depth=self.min_depth, expand_ratio=self.expand_ratio, output_stride=self.output_stride, first_layer_is_expansion=self.first_layer_is_expansion, finegrained_output=self.finegrained_output, hidden_act=self.hidden_act, tf_padding=self.tf_padding, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, )
def __magic_name__ ( self : List[Any], __A : Dict, __A : Optional[Any], __A : Optional[int], __A : Union[str, Any] ):
UpperCAmelCase : Any = MobileNetVaModel(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase : Optional[Any] = model(__A )
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
self.parent.assertEqual(
result.pooler_output.shape, (self.batch_size, self.last_hidden_size), )
def __magic_name__ ( self : str, __A : Union[str, Any], __A : Dict, __A : Optional[Any], __A : str ):
UpperCAmelCase : Optional[int] = self.num_labels
UpperCAmelCase : Any = MobileNetVaForImageClassification(__A )
model.to(__A )
model.eval()
UpperCAmelCase : Optional[int] = model(__A, labels=__A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def __magic_name__ ( self : List[Any], __A : Optional[Any], __A : List[str], __A : Dict, __A : Dict ):
UpperCAmelCase : Tuple = self.num_labels
UpperCAmelCase : Dict = MobileNetVaForSemanticSegmentation(__A )
model.to(__A )
model.eval()
UpperCAmelCase : Dict = model(__A )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
UpperCAmelCase : Optional[Any] = model(__A, labels=__A )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def __magic_name__ ( self : Tuple ):
UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = config_and_inputs
UpperCAmelCase : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCamelCase = (
{
"""feature-extraction""": MobileNetVaModel,
"""image-classification""": MobileNetVaForImageClassification,
"""image-segmentation""": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : List[Any] = MobileNetVaModelTester(self )
UpperCAmelCase : List[Any] = MobileNetVaConfigTester(self, config_class=__A, has_text_modality=__A )
def __magic_name__ ( self : Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV2 does not use inputs_embeds''' )
def __magic_name__ ( self : Optional[int] ):
pass
@unittest.skip(reason='''MobileNetV2 does not support input and output embeddings''' )
def __magic_name__ ( self : Tuple ):
pass
@unittest.skip(reason='''MobileNetV2 does not output attentions''' )
def __magic_name__ ( self : Any ):
pass
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Optional[Any] = model_class(__A )
UpperCAmelCase : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : Union[str, Any] = [*signature.parameters.keys()]
UpperCAmelCase : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1], __A )
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __magic_name__ ( self : int ):
def check_hidden_states_output(__A : Any, __A : Optional[Any], __A : str ):
UpperCAmelCase : Union[str, Any] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
UpperCAmelCase : Dict = model(**self._prepare_for_class(__A, __A ) )
UpperCAmelCase : Optional[Any] = outputs.hidden_states
UpperCAmelCase : List[Any] = 1_6
self.assertEqual(len(__A ), __A )
UpperCAmelCase , UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Tuple = True
check_hidden_states_output(__A, __A, __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : Tuple = True
check_hidden_states_output(__A, __A, __A )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
def __magic_name__ ( self : int ):
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__A )
@slow
def __magic_name__ ( self : Dict ):
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Optional[Any] = MobileNetVaModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def a__ ( ) -> int:
UpperCAmelCase : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : List[Any] ):
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v2_1.0_224''' ) if is_vision_available() else None
)
@slow
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : List[Any] = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v2_1.0_224''' ).to(__A )
UpperCAmelCase : Optional[int] = self.default_image_processor
UpperCAmelCase : Optional[Any] = prepare_img()
UpperCAmelCase : Dict = image_processor(images=__A, return_tensors='''pt''' ).to(__A )
# forward pass
with torch.no_grad():
UpperCAmelCase : str = model(**__A )
# verify the logits
UpperCAmelCase : int = torch.Size((1, 1_0_0_1) )
self.assertEqual(outputs.logits.shape, __A )
UpperCAmelCase : Tuple = torch.tensor([0.2_4_4_5, -1.1_9_9_3, 0.1_9_0_5] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3], __A, atol=1E-4 ) )
@slow
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : Tuple = MobileNetVaForSemanticSegmentation.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' )
UpperCAmelCase : List[Any] = model.to(__A )
UpperCAmelCase : Tuple = MobileNetVaImageProcessor.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' )
UpperCAmelCase : List[Any] = prepare_img()
UpperCAmelCase : int = image_processor(images=__A, return_tensors='''pt''' ).to(__A )
# forward pass
with torch.no_grad():
UpperCAmelCase : Union[str, Any] = model(**__A )
UpperCAmelCase : Optional[Any] = outputs.logits
# verify the logits
UpperCAmelCase : Tuple = torch.Size((1, 2_1, 6_5, 6_5) )
self.assertEqual(logits.shape, __A )
UpperCAmelCase : Tuple = torch.tensor(
[
[[1_7.5_7_9_0, 1_7.7_5_8_1, 1_8.3_3_5_5], [1_8.3_2_5_7, 1_8.4_2_3_0, 1_8.8_9_7_3], [1_8.6_1_6_9, 1_8.8_6_5_0, 1_9.2_1_8_7]],
[[-2.1_5_9_5, -2.0_9_7_7, -2.3_7_4_1], [-2.4_2_2_6, -2.3_0_2_8, -2.6_8_3_5], [-2.7_8_1_9, -2.5_9_9_1, -2.7_7_0_6]],
[[4.2_0_5_8, 4.8_3_1_7, 4.7_6_3_8], [4.4_1_3_6, 5.0_3_6_1, 4.9_3_8_3], [4.5_0_2_8, 4.9_6_4_4, 4.8_7_3_4]],
], device=__A, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], __A, atol=1E-4 ) )
| 336 | 0 |
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCAmelCase__ ( unittest.TestCase ):
@property
def __A ( self : List[Any] ) -> Optional[Any]:
torch.manual_seed(0 )
__lowerCamelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def __A ( self : Optional[int] ) -> Optional[Any]:
__lowerCamelCase = self.dummy_uncond_unet
__lowerCamelCase = ScoreSdeVeScheduler()
__lowerCamelCase = ScoreSdeVePipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
sde_ve.to(SCREAMING_SNAKE_CASE__ )
sde_ve.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=SCREAMING_SNAKE_CASE__ ).images
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ )[
0
]
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
def __A ( self : Tuple ) -> str:
__lowerCamelCase = '''google/ncsnpp-church-256'''
__lowerCamelCase = UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = ScoreSdeVeScheduler.from_pretrained(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = ScoreSdeVePipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
sde_ve.to(SCREAMING_SNAKE_CASE__ )
sde_ve.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = sde_ve(num_inference_steps=10 , output_type='''numpy''' , generator=SCREAMING_SNAKE_CASE__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
__lowerCamelCase = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 339 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
SCREAMING_SNAKE_CASE__ : int = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase__ :
a__ : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
a__ : Optional[str] = field(
default=__lowercase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a__ : Optional[str] = field(
default=__lowercase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
a__ : Optional[str] = field(
default=__lowercase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
a__ : bool = field(default=__lowercase , metadata={"""help""": """Whether tp freeze the encoder."""} )
a__ : bool = field(default=__lowercase , metadata={"""help""": """Whether to freeze the embeddings."""} )
@dataclass
class lowerCAmelCase__ :
a__ : str = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
a__ : Optional[str] = field(
default="""summarization""" , metadata={"""help""": """Task name, summarization (or summarization_{dataset} for pegasus) or translation"""} , )
a__ : Optional[int] = field(
default=1_024 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a__ : Optional[int] = field(
default=128 , metadata={
"""help""": (
"""The maximum total sequence length for target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a__ : Optional[int] = field(
default=142 , metadata={
"""help""": (
"""The maximum total sequence length for validation target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded. """
"""This argument is also used to override the ``max_length`` param of ``model.generate``, which is used """
"""during ``evaluate`` and ``predict``."""
)
} , )
a__ : Optional[int] = field(
default=142 , metadata={
"""help""": (
"""The maximum total sequence length for test target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a__ : Optional[int] = field(default=-1 , metadata={"""help""": """# training examples. -1 means use all."""} )
a__ : Optional[int] = field(default=-1 , metadata={"""help""": """# validation examples. -1 means use all."""} )
a__ : Optional[int] = field(default=-1 , metadata={"""help""": """# test examples. -1 means use all."""} )
a__ : Optional[str] = field(default=__lowercase , metadata={"""help""": """Source language id for translation."""} )
a__ : Optional[str] = field(default=__lowercase , metadata={"""help""": """Target language id for translation."""} )
a__ : Optional[int] = field(default=__lowercase , metadata={"""help""": """# num_beams to use for evaluation."""} )
a__ : bool = field(
default=__lowercase , metadata={"""help""": """If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."""} , )
def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : int ) -> Dict:
logger.info(f'''***** {split} metrics *****''' )
for key in sorted(metrics.keys() ):
logger.info(f''' {key} = {metrics[key]}''' )
save_json(__lowerCAmelCase , os.path.join(__lowerCAmelCase , f'''{split}_results.json''' ) )
def __magic_name__ ( ) -> Optional[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = parser.parse_args_into_dataclasses()
check_output_dir(__lowerCAmelCase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , __lowerCAmelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__lowerCamelCase = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
assert hasattr(__lowerCAmelCase , __lowerCAmelCase ), f'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute'''
setattr(__lowerCAmelCase , __lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) )
__lowerCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(__lowerCAmelCase , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
__lowerCamelCase = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(__lowerCAmelCase , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
__lowerCamelCase = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
__lowerCamelCase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(__lowerCAmelCase )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
__lowerCamelCase = SeqaSeqDataset
# Get datasets
__lowerCamelCase = (
dataset_class(
__lowerCAmelCase , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_train
else None
)
__lowerCamelCase = (
dataset_class(
__lowerCAmelCase , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
__lowerCamelCase = (
dataset_class(
__lowerCAmelCase , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
__lowerCamelCase = (
build_compute_metrics_fn(data_args.task , __lowerCAmelCase ) if training_args.predict_with_generate else None
)
__lowerCamelCase = SeqaSeqTrainer(
model=__lowerCAmelCase , args=__lowerCAmelCase , data_args=__lowerCAmelCase , train_dataset=__lowerCAmelCase , eval_dataset=__lowerCAmelCase , data_collator=SeqaSeqDataCollator(
__lowerCAmelCase , __lowerCAmelCase , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=__lowerCAmelCase , tokenizer=__lowerCAmelCase , )
__lowerCamelCase = {}
# Training
if training_args.do_train:
logger.info('''*** Train ***''' )
__lowerCamelCase = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
__lowerCamelCase = train_result.metrics
__lowerCamelCase = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('''train''' , __lowerCAmelCase , training_args.output_dir )
all_metrics.update(__lowerCAmelCase )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__lowerCamelCase = trainer.evaluate(metric_key_prefix='''val''' )
__lowerCamelCase = data_args.n_val
__lowerCamelCase = round(metrics['''val_loss'''] , 4 )
if trainer.is_world_process_zero():
handle_metrics('''val''' , __lowerCAmelCase , training_args.output_dir )
all_metrics.update(__lowerCAmelCase )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
__lowerCamelCase = trainer.predict(test_dataset=__lowerCAmelCase , metric_key_prefix='''test''' )
__lowerCamelCase = test_output.metrics
__lowerCamelCase = data_args.n_test
if trainer.is_world_process_zero():
__lowerCamelCase = round(metrics['''test_loss'''] , 4 )
handle_metrics('''test''' , __lowerCAmelCase , training_args.output_dir )
all_metrics.update(__lowerCAmelCase )
if training_args.predict_with_generate:
__lowerCamelCase = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
__lowerCamelCase = lmap(str.strip , __lowerCAmelCase )
write_txt_file(__lowerCAmelCase , os.path.join(training_args.output_dir , '''test_generations.txt''' ) )
if trainer.is_world_process_zero():
save_json(__lowerCAmelCase , os.path.join(training_args.output_dir , '''all_results.json''' ) )
return all_metrics
def __magic_name__ ( __lowerCAmelCase : List[str] ) -> Union[str, Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 339 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[int] ) -> bool:
return str(_UpperCAmelCase ) == str(_UpperCAmelCase )[::-1]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[str] ) -> int:
return int(_UpperCAmelCase ) + int(str(_UpperCAmelCase )[::-1] )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Any = 10000 ) -> int:
_a : List[str] =[]
for num in range(1 ,_UpperCAmelCase ):
_a : Any =0
_a : Tuple =num
while iterations < 50:
_a : Dict =sum_reverse(_UpperCAmelCase )
iterations += 1
if is_palindrome(_UpperCAmelCase ):
break
else:
lychrel_nums.append(_UpperCAmelCase )
return len(_UpperCAmelCase )
if __name__ == "__main__":
print(F"{solution() = }")
| 276 | """simple docstring"""
import sys
from collections import defaultdict
class UpperCamelCase :
def __init__( self) -> Optional[int]:
snake_case_ = []
def a_ ( self, lowerCAmelCase__) -> Any:
return self.node_position[vertex]
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> Dict:
snake_case_ = pos
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> str:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
snake_case_ = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
snake_case_ = 2 * start + 1
else:
snake_case_ = 2 * start + 2
if heap[smallest_child] < heap[start]:
snake_case_ , snake_case_ = heap[smallest_child], positions[smallest_child]
snake_case_ , snake_case_ = (
heap[start],
positions[start],
)
snake_case_ , snake_case_ = temp, tempa
snake_case_ = self.get_position(positions[smallest_child])
self.set_position(
positions[smallest_child], self.get_position(positions[start]))
self.set_position(positions[start], lowerCAmelCase__)
self.top_to_bottom(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> List[str]:
snake_case_ = position[index]
while index != 0:
snake_case_ = int((index - 2) / 2) if index % 2 == 0 else int((index - 1) / 2)
if val < heap[parent]:
snake_case_ = heap[parent]
snake_case_ = position[parent]
self.set_position(position[parent], lowerCAmelCase__)
else:
snake_case_ = val
snake_case_ = temp
self.set_position(lowerCAmelCase__, lowerCAmelCase__)
break
snake_case_ = parent
else:
snake_case_ = val
snake_case_ = temp
self.set_position(lowerCAmelCase__, 0)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> Dict:
snake_case_ = len(lowerCAmelCase__) // 2 - 1
for i in range(lowerCAmelCase__, -1, -1):
self.top_to_bottom(lowerCAmelCase__, lowerCAmelCase__, len(lowerCAmelCase__), lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> Union[str, Any]:
snake_case_ = positions[0]
snake_case_ = sys.maxsize
self.top_to_bottom(lowerCAmelCase__, 0, len(lowerCAmelCase__), lowerCAmelCase__)
return temp
def UpperCAmelCase ( UpperCAmelCase ) -> Tuple:
snake_case_ = Heap()
snake_case_ = [0] * len(UpperCAmelCase )
snake_case_ = [-1] * len(UpperCAmelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
snake_case_ = [] # Heap of Distance of vertices from their neighboring vertex
snake_case_ = []
for vertex in range(len(UpperCAmelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(UpperCAmelCase )
heap.node_position.append(UpperCAmelCase )
snake_case_ = []
snake_case_ = 1
snake_case_ = sys.maxsize
for neighbor, distance in adjacency_list[0]:
snake_case_ = 0
snake_case_ = distance
heap.heapify(UpperCAmelCase , UpperCAmelCase )
for _ in range(1 , len(UpperCAmelCase ) ):
snake_case_ = heap.delete_minimum(UpperCAmelCase , UpperCAmelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
snake_case_ = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(UpperCAmelCase )]
):
snake_case_ = distance
heap.bottom_to_top(
UpperCAmelCase , heap.get_position(UpperCAmelCase ) , UpperCAmelCase , UpperCAmelCase )
snake_case_ = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__UpperCamelCase = int(input('''Enter number of edges: ''').strip())
__UpperCamelCase = defaultdict(list)
for _ in range(edges_number):
__UpperCamelCase = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 69 | 0 |
"""simple docstring"""
import math
def UpperCAmelCase ( a_, a_ ):
'''simple docstring'''
if initial_intensity < 0:
raise ValueError('The value of intensity cannot be negative' )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError('In Malus Law, the angle is in the range 0-360 degrees' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(a_ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 205 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
_A = logging.get_logger(__name__)
_A = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all BART models at https://huggingface.co/models?filter=bart
_A = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
'tokenizer_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json',
},
}
_A = {
'facebook/bart-base': 1_0_2_4,
'facebook/bart-large': 1_0_2_4,
'facebook/bart-large-mnli': 1_0_2_4,
'facebook/bart-large-cnn': 1_0_2_4,
'facebook/bart-large-xsum': 1_0_2_4,
'yjernite/bart_eli5': 1_0_2_4,
}
class _lowercase ( __UpperCAmelCase ):
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = ['input_ids', 'attention_mask']
lowercase_ = BartTokenizer
def __init__( self , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_="replace" , UpperCAmelCase_="<s>" , UpperCAmelCase_="</s>" , UpperCAmelCase_="</s>" , UpperCAmelCase_="<s>" , UpperCAmelCase_="<unk>" , UpperCAmelCase_="<pad>" , UpperCAmelCase_="<mask>" , UpperCAmelCase_=False , UpperCAmelCase_=True , **UpperCAmelCase_ , ) -> Union[str, Any]:
super().__init__(
UpperCAmelCase_ , UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , errors=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowerCamelCase : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , UpperCAmelCase_ ) != add_prefix_space:
lowerCamelCase : Tuple = getattr(UpperCAmelCase_ , pre_tok_state.pop('type' ) )
lowerCamelCase : Optional[Any] = add_prefix_space
lowerCamelCase : str = pre_tok_class(**UpperCAmelCase_ )
lowerCamelCase : Optional[Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowerCamelCase : Dict = 'post_processor'
lowerCamelCase : str = getattr(self.backend_tokenizer , UpperCAmelCase_ , UpperCAmelCase_ )
if tokenizer_component_instance:
lowerCamelCase : Optional[Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCamelCase : int = tuple(state['sep'] )
if "cls" in state:
lowerCamelCase : str = tuple(state['cls'] )
lowerCamelCase : Optional[Any] = False
if state.get('add_prefix_space' , UpperCAmelCase_ ) != add_prefix_space:
lowerCamelCase : Dict = add_prefix_space
lowerCamelCase : Tuple = True
if state.get('trim_offsets' , UpperCAmelCase_ ) != trim_offsets:
lowerCamelCase : Tuple = trim_offsets
lowerCamelCase : Dict = True
if changes_to_apply:
lowerCamelCase : Optional[int] = getattr(UpperCAmelCase_ , state.pop('type' ) )
lowerCamelCase : Any = component_class(**UpperCAmelCase_ )
setattr(self.backend_tokenizer , UpperCAmelCase_ , UpperCAmelCase_ )
@property
def _UpperCamelCase ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def _UpperCamelCase ( self , UpperCAmelCase_ ) -> List[Any]:
lowerCamelCase : Optional[int] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else value
lowerCamelCase : int = value
def _UpperCamelCase ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ) -> BatchEncoding:
lowerCamelCase : str = kwargs.get('is_split_into_words' , UpperCAmelCase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ )
def _UpperCamelCase ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ) -> BatchEncoding:
lowerCamelCase : Optional[Any] = kwargs.get('is_split_into_words' , UpperCAmelCase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ )
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ) -> Tuple[str]:
lowerCamelCase : Any = self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_=None ) -> List[Any]:
lowerCamelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ) -> List[int]:
lowerCamelCase : List[Any] = [self.sep_token_id]
lowerCamelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 205 | 1 |
"""simple docstring"""
# using dfs for finding eulerian path traversal
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : int , snake_case__ : List[str]=None ):
"""simple docstring"""
_snake_case : List[Any] = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
_snake_case , _snake_case : Dict = True, True
_snake_case : str = dfs(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
return path
def UpperCAmelCase__ (snake_case__ : Union[str, Any] , snake_case__ : Optional[int] ):
"""simple docstring"""
_snake_case : List[str] = 0
_snake_case : List[str] = -1
for i in range(snake_case__ ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
_snake_case : int = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : Tuple = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
_snake_case , _snake_case : Dict = check_circuit_or_path(snake_case__ , snake_case__ )
if check == 3:
print("""graph is not Eulerian""" )
print("""no path""" )
return
_snake_case : int = 1
if check == 2:
_snake_case : Optional[int] = odd_node
print("""graph has a Euler path""" )
if check == 1:
print("""graph has a Euler cycle""" )
_snake_case : Optional[int] = dfs(snake_case__ , snake_case__ , snake_case__ )
print(snake_case__ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : List[str] = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
_snake_case : Dict = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
_snake_case : Optional[Any] = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
_snake_case : List[str] = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
_snake_case : List[str] = {
1: [],
2: []
# all degree is zero
}
_snake_case : List[Any] = 10
check_euler(snake_case__ , snake_case__ )
check_euler(snake_case__ , snake_case__ )
check_euler(snake_case__ , snake_case__ )
check_euler(snake_case__ , snake_case__ )
check_euler(snake_case__ , snake_case__ )
if __name__ == "__main__":
main()
| 64 |
from __future__ import annotations
from typing import Any
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> None:
create_state_space_tree(_UpperCAmelCase , [] , 0 )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> None:
if index == len(_UpperCAmelCase ):
print(_UpperCAmelCase )
return
create_state_space_tree(_UpperCAmelCase , _UpperCAmelCase , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(_UpperCAmelCase , _UpperCAmelCase , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
_UpperCAmelCase : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["""A""", """B""", """C"""])
generate_all_subsequences(seq)
| 50 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__A = logging.get_logger(__name__)
__A = {
"SenseTime/deformable-detr": "https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class SCREAMING_SNAKE_CASE ( _UpperCAmelCase ):
"""simple docstring"""
A_ = "deformable_detr"
A_ = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self: Any , __A: Tuple=True , __A: List[str]=None , __A: Optional[Any]=3 , __A: Any=3_00 , __A: Tuple=10_24 , __A: Optional[int]=6 , __A: List[str]=10_24 , __A: Dict=8 , __A: Dict=6 , __A: Tuple=10_24 , __A: Optional[int]=8 , __A: Dict=0.0 , __A: str=True , __A: Union[str, Any]="relu" , __A: List[str]=2_56 , __A: List[str]=0.1 , __A: Any=0.0 , __A: Any=0.0 , __A: Optional[Any]=0.02 , __A: Tuple=1.0 , __A: Dict=True , __A: Union[str, Any]=False , __A: Tuple="sine" , __A: Dict="resnet50" , __A: Union[str, Any]=True , __A: Union[str, Any]=False , __A: List[str]=4 , __A: List[Any]=4 , __A: Union[str, Any]=4 , __A: Union[str, Any]=False , __A: List[Any]=3_00 , __A: Any=False , __A: Any=1 , __A: Union[str, Any]=5 , __A: List[Any]=2 , __A: List[str]=1 , __A: List[Any]=1 , __A: Optional[int]=5 , __A: List[str]=2 , __A: Tuple=0.1 , __A: Optional[Any]=0.25 , __A: Tuple=False , **__A: Optional[int] , ) -> List[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
_A = CONFIG_MAPPING["""resnet"""](out_features=['''stage4'''] )
elif isinstance(lowercase_ , lowercase_ ):
_A = backbone_config.get('''model_type''' )
_A = CONFIG_MAPPING[backbone_model_type]
_A = config_class.from_dict(lowercase_ )
_A = use_timm_backbone
_A = backbone_config
_A = num_channels
_A = num_queries
_A = max_position_embeddings
_A = d_model
_A = encoder_ffn_dim
_A = encoder_layers
_A = encoder_attention_heads
_A = decoder_ffn_dim
_A = decoder_layers
_A = decoder_attention_heads
_A = dropout
_A = attention_dropout
_A = activation_dropout
_A = activation_function
_A = init_std
_A = init_xavier_std
_A = encoder_layerdrop
_A = auxiliary_loss
_A = position_embedding_type
_A = backbone
_A = use_pretrained_backbone
_A = dilation
# deformable attributes
_A = num_feature_levels
_A = encoder_n_points
_A = decoder_n_points
_A = two_stage
_A = two_stage_num_proposals
_A = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
_A = class_cost
_A = bbox_cost
_A = giou_cost
# Loss coefficients
_A = mask_loss_coefficient
_A = dice_loss_coefficient
_A = bbox_loss_coefficient
_A = giou_loss_coefficient
_A = eos_coefficient
_A = focal_alpha
_A = disable_custom_kernels
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ )
@property
def __A ( self: str ) -> List[Any]:
return self.encoder_attention_heads
@property
def __A ( self: Any ) -> Tuple:
return self.d_model
def __A ( self: Dict ) -> Dict:
_A = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
_A = self.backbone_config.to_dict()
_A = self.__class__.model_type
return output
| 371 |
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
def __A ( self: str ) -> Any:
_A = tempfile.mkdtemp()
_A = 8
# DPR tok
_A = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_A = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(__A , exist_ok=__A )
_A = os.path.join(__A , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
_A = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_A = dict(zip(__A , range(len(__A ) ) ) )
_A = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_A = {'''unk_token''': '''<unk>'''}
_A = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(__A , exist_ok=__A )
_A = os.path.join(__A , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
_A = os.path.join(__A , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__A ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__A ) )
def __A ( self: List[str] ) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def __A ( self: List[str] ) -> DPRContextEncoderTokenizer:
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def __A ( self: Tuple ) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def __A ( self: Union[str, Any] ) -> List[str]:
shutil.rmtree(self.tmpdirname )
def __A ( self: Dict ) -> Dict:
_A = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def __A ( self: Dict ) -> Union[str, Any]:
_A = self.get_dummy_dataset()
_A = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
_A = dataset
_A = RagRetriever(
__A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def __A ( self: Optional[int] , __A: bool ) -> Any:
_A = self.get_dummy_dataset()
_A = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
_A = os.path.join(self.tmpdirname , '''dataset''' )
_A = os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
_A = RagRetriever(
__A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
_A = RagRetriever(
__A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __A ) , )
return retriever
def __A ( self: Dict ) -> Dict:
_A = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
_A = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
_A = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
_A = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(__A , open(__A , '''wb''' ) )
_A = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
_A = RagRetriever(
__A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def __A ( self: Tuple ) -> Optional[int]:
_A = 1
_A = self.get_dummy_canonical_hf_index_retriever()
_A = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_A ,_A ,_A = retriever.retrieve(__A , n_docs=__A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __A )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __A ( self: Any ) -> Optional[Any]:
_A = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
_A = self.get_dummy_dataset()
retriever.save_pretrained(__A )
_A = RagRetriever.from_pretrained(__A )
self.assertIsInstance(__A , __A )
_A = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_A = retriever.retrieve(__A , n_docs=1 )
self.assertTrue(out is not None )
def __A ( self: str ) -> Any:
_A = 1
_A = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
_A = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_A ,_A ,_A = retriever.retrieve(__A , n_docs=__A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __A )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __A ( self: int ) -> Optional[int]:
_A = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__A )
_A = RagRetriever.from_pretrained(__A )
self.assertIsInstance(__A , __A )
_A = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_A = retriever.retrieve(__A , n_docs=1 )
self.assertTrue(out is not None )
def __A ( self: str ) -> List[Any]:
_A = 1
_A = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
_A = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_A ,_A ,_A = retriever.retrieve(__A , n_docs=__A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __A )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __A ( self: List[Any] ) -> Any:
_A = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__A )
_A = RagRetriever.from_pretrained(__A )
self.assertIsInstance(__A , __A )
_A = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_A = retriever.retrieve(__A , n_docs=1 )
self.assertTrue(out is not None )
def __A ( self: Tuple ) -> List[Any]:
_A = 1
_A = self.get_dummy_legacy_index_retriever()
_A = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_A ,_A ,_A = retriever.retrieve(__A , n_docs=__A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , __A )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __A ( self: List[str] ) -> Optional[int]:
_A = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__A )
_A = RagRetriever.from_pretrained(__A )
self.assertIsInstance(__A , __A )
_A = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_A = retriever.retrieve(__A , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def __A ( self: Tuple ) -> Union[str, Any]:
import torch
_A = 1
_A = self.get_dummy_canonical_hf_index_retriever()
_A = [[5, 7], [10, 11]]
_A = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_A = retriever(__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A )
_A ,_A ,_A = (
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__A , __A )
self.assertIsInstance(__A , __A )
self.assertIsInstance(__A , np.ndarray )
_A = retriever(
__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A , return_tensors='''pt''' , )
_A ,_A ,_A ,_A = ( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__A , torch.Tensor )
self.assertIsInstance(__A , torch.Tensor )
self.assertIsInstance(__A , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def __A ( self: int ) -> Dict:
_A = self.get_dpr_ctx_encoder_tokenizer()
_A = 1
_A = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
retriever.set_ctx_encoder_tokenizer(__A )
_A = [[5, 7], [10, 11]]
_A = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_A = retriever(__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A )
self.assertEqual(
len(__A ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , __A ) # check for doc token related keys in dictionary.
| 75 | 0 |
"""simple docstring"""
from argparse import ArgumentParser
from .env import EnvironmentCommand
def a_ ( ):
UpperCAmelCase__ = ArgumentParser('Diffusers CLI tool' , usage='diffusers-cli <command> [<args>]' )
UpperCAmelCase__ = parser.add_subparsers(help='diffusers-cli command helpers' )
# Register commands
EnvironmentCommand.register_subcommand(lowerCamelCase )
# Let's go
UpperCAmelCase__ = parser.parse_args()
if not hasattr(lowerCamelCase , 'func' ):
parser.print_help()
exit(1 )
# Run
UpperCAmelCase__ = args.func(lowerCamelCase )
service.run()
if __name__ == "__main__":
main()
| 98 | """simple docstring"""
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = (PNDMScheduler,)
snake_case__ = (("num_inference_steps", 50),)
def __lowerCAmelCase ( self : List[str] ,**lowerCamelCase__ : str ):
UpperCAmelCase__ = {
'num_train_timesteps': 1_000,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
}
config.update(**lowerCamelCase__ )
return config
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : Optional[Any]=0 ,**lowerCamelCase__ : List[str] ):
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop('num_inference_steps' ,lowerCamelCase__ )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config(**lowerCamelCase__ )
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
UpperCAmelCase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
UpperCAmelCase__ = scheduler_class.from_pretrained(lowerCamelCase__ )
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = new_scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = new_scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self : Tuple ):
pass
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : List[str]=0 ,**lowerCamelCase__ : Tuple ):
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop('num_inference_steps' ,lowerCamelCase__ )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
UpperCAmelCase__ = scheduler_class.from_pretrained(lowerCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = new_scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = new_scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self : List[Any] ,**lowerCamelCase__ : int ):
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config(**lowerCamelCase__ )
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
UpperCAmelCase__ = 10
UpperCAmelCase__ = self.dummy_model()
UpperCAmelCase__ = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase__ )
for i, t in enumerate(scheduler.prk_timesteps ):
UpperCAmelCase__ = model(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
UpperCAmelCase__ = model(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).prev_sample
return sample
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop('num_inference_steps' ,lowerCamelCase__ )
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCamelCase__ ,'set_timesteps' ):
scheduler.set_timesteps(lowerCamelCase__ )
elif num_inference_steps is not None and not hasattr(lowerCamelCase__ ,'set_timesteps' ):
UpperCAmelCase__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,0 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,1 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,0 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,1 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def __lowerCAmelCase ( self : List[Any] ):
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[int] ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCamelCase__ )
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config(steps_offset=1 )
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps ,torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) ,)
def __lowerCAmelCase ( self : Dict ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1] ,[0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=lowerCamelCase__ ,beta_end=lowerCamelCase__ )
def __lowerCAmelCase ( self : Union[str, Any] ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[Any] ):
for t in [1, 5, 10]:
self.check_over_forward(time_step=lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ):
for t, num_inference_steps in zip([1, 5, 10] ,[10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowerCamelCase__ )
def __lowerCAmelCase ( self : int ):
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
UpperCAmelCase__ = 27
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).prev_sample
def __lowerCAmelCase ( self : int ):
with self.assertRaises(lowerCamelCase__ ):
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.step_plms(self.dummy_sample ,1 ,self.dummy_sample ).prev_sample
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = self.full_loop()
UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 1_9_8.1_3_1_8 ) < 1e-2
assert abs(result_mean.item() - 0.2_5_8_0 ) < 1e-3
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = self.full_loop(prediction_type='v_prediction' )
UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 6_7.3_9_8_6 ) < 1e-2
assert abs(result_mean.item() - 0.0_8_7_8 ) < 1e-3
def __lowerCAmelCase ( self : Union[str, Any] ):
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase__ = self.full_loop(set_alpha_to_one=lowerCamelCase__ ,beta_start=0.0_1 )
UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 2_3_0.0_3_9_9 ) < 1e-2
assert abs(result_mean.item() - 0.2_9_9_5 ) < 1e-3
def __lowerCAmelCase ( self : Tuple ):
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase__ = self.full_loop(set_alpha_to_one=lowerCamelCase__ ,beta_start=0.0_1 )
UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 1_8_6.9_4_8_2 ) < 1e-2
assert abs(result_mean.item() - 0.2_4_3_4 ) < 1e-3
| 98 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ : str = logging.get_logger(__name__)
lowerCAmelCase__ : Any = "▁"
lowerCAmelCase__ : Any = {"vocab_file": "sentencepiece.bpe.model"}
lowerCAmelCase__ : Optional[Any] = {
"vocab_file": {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"
),
}
}
lowerCAmelCase__ : Any = {
"xlm-roberta-base": 5_12,
"xlm-roberta-large": 5_12,
"xlm-roberta-large-finetuned-conll02-dutch": 5_12,
"xlm-roberta-large-finetuned-conll02-spanish": 5_12,
"xlm-roberta-large-finetuned-conll03-english": 5_12,
"xlm-roberta-large-finetuned-conll03-german": 5_12,
}
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = ['''input_ids''', '''attention_mask''']
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any="<s>" , UpperCAmelCase_ : Optional[int]="</s>" , UpperCAmelCase_ : int="</s>" , UpperCAmelCase_ : Optional[Any]="<s>" , UpperCAmelCase_ : int="<unk>" , UpperCAmelCase_ : Any="<pad>" , UpperCAmelCase_ : Any="<mask>" , UpperCAmelCase_ : Optional[Dict[str, Any]] = None , **UpperCAmelCase_ : List[Any] , ):
"""simple docstring"""
__UpperCAmelCase : List[str] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else mask_token
__UpperCAmelCase : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , )
__UpperCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCAmelCase_ ) )
__UpperCAmelCase : Tuple = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
__UpperCAmelCase : Any = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__UpperCAmelCase : List[Any] = 1
__UpperCAmelCase : Union[str, Any] = len(self.sp_model ) + self.fairseq_offset
__UpperCAmelCase : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : List[str] ):
"""simple docstring"""
__UpperCAmelCase : List[str] = self.__dict__.copy()
__UpperCAmelCase : int = None
__UpperCAmelCase : Optional[int] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Tuple , UpperCAmelCase_ : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase : Tuple = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__UpperCAmelCase : Optional[int] = {}
__UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowerCamelCase_ ( self : Dict , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCAmelCase : List[str] = [self.cls_token_id]
__UpperCAmelCase : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase_ )) + [1]
return [1] + ([0] * len(UpperCAmelCase_ )) + [1, 1] + ([0] * len(UpperCAmelCase_ )) + [1]
def lowerCamelCase_ ( self : int , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ):
"""simple docstring"""
__UpperCAmelCase : Tuple = [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = {self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase_ : str ):
"""simple docstring"""
return self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_ )
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__UpperCAmelCase : Any = self.sp_model.PieceToId(UpperCAmelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase_ : List[Any] ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase_ : List[str] ):
"""simple docstring"""
__UpperCAmelCase : Dict = "".join(UpperCAmelCase_ ).replace(UpperCAmelCase_ , " " ).strip()
return out_string
def lowerCamelCase_ ( self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
__UpperCAmelCase : Union[str, Any] = os.path.join(
UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase_ , "wb" ) as fi:
__UpperCAmelCase : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_ )
return (out_vocab_file,)
| 352 |
'''simple docstring'''
from collections.abc import Callable
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
__UpperCAmelCase : float = a
__UpperCAmelCase : float = b
if function(_UpperCAmelCase ) == 0: # one of the a or b is a root for the function
return a
elif function(_UpperCAmelCase ) == 0:
return b
elif (
function(_UpperCAmelCase ) * function(_UpperCAmelCase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("could not find root in given interval." )
else:
__UpperCAmelCase : float = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(_UpperCAmelCase ) == 0:
return mid
elif function(_UpperCAmelCase ) * function(_UpperCAmelCase ) < 0:
__UpperCAmelCase : int = mid
else:
__UpperCAmelCase : Dict = mid
__UpperCAmelCase : str = start + (end - start) / 2.0
return mid
def __UpperCamelCase ( _UpperCAmelCase ):
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 10_00))
import doctest
doctest.testmod()
| 37 | 0 |
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _snake_case ( _a , unittest.TestCase ):
_A : Optional[int] = VideoToVideoSDPipeline
_A : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({'''video'''} ) - {'''image''', '''width''', '''height'''}
_A : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''video'''} ) - {'''image'''}
_A : Any = PipelineTesterMixin.required_optional_params - {'''latents'''}
_A : int = False
# No `output_type`.
_A : Any = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def __UpperCamelCase ( self : Optional[Any] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE:List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") ,up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") ,cross_attention_dim=32 ,attention_head_dim=4 ,)
SCREAMING_SNAKE_CASE:str = DDIMScheduler(
beta_start=0.00_085 ,beta_end=0.012 ,beta_schedule="scaled_linear" ,clip_sample=SCREAMING_SNAKE_CASE__ ,set_alpha_to_one=SCREAMING_SNAKE_CASE__ ,)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE:Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,sample_size=128 ,)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE:Tuple = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,hidden_act="gelu" ,projection_dim=512 ,)
SCREAMING_SNAKE_CASE:List[str] = CLIPTextModel(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Dict = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
SCREAMING_SNAKE_CASE:Optional[Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def __UpperCamelCase ( self : str ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : List[Any]=0 ):
# 3 frames
SCREAMING_SNAKE_CASE:str = floats_tensor((1, 3, 3, 32, 32) ,rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
if str(SCREAMING_SNAKE_CASE__ ).startswith("mps" ):
SCREAMING_SNAKE_CASE:Optional[int] = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
SCREAMING_SNAKE_CASE:Optional[int] = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Dict = {
"prompt": "A painting of a squirrel eating a burger",
"video": video,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def __UpperCamelCase ( self : str ):
SCREAMING_SNAKE_CASE:Optional[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE:Union[str, Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE:List[str] = VideoToVideoSDPipeline(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:List[str] = sd_pipe.to(SCREAMING_SNAKE_CASE__ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Tuple = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Union[str, Any] = "np"
SCREAMING_SNAKE_CASE:List[Any] = sd_pipe(**SCREAMING_SNAKE_CASE__ ).frames
SCREAMING_SNAKE_CASE:Union[str, Any] = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
SCREAMING_SNAKE_CASE:Optional[Any] = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() ,reason="XFormers attention is only available with CUDA and `xformers` installed" ,)
def __UpperCamelCase ( self : str ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=SCREAMING_SNAKE_CASE__ ,expected_max_diff=5e-3 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def __UpperCamelCase ( self : Optional[int] ):
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def __UpperCamelCase ( self : List[str] ):
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def __UpperCamelCase ( self : List[Any] ):
pass
def __UpperCamelCase ( self : List[Any] ):
return super().test_progress_bar()
@slow
@skip_mps
class _snake_case ( unittest.TestCase ):
def __UpperCamelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE:str = VideoToVideoSDPipeline.from_pretrained("cerspense/zeroscope_v2_XL" ,torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
SCREAMING_SNAKE_CASE:Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
SCREAMING_SNAKE_CASE:Union[str, Any] = torch.randn((1, 10, 3, 1_024, 576) ,generator=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:int = video.to("cuda" )
SCREAMING_SNAKE_CASE:Optional[Any] = "Spiderman is surfing"
SCREAMING_SNAKE_CASE:Any = pipe(SCREAMING_SNAKE_CASE__ ,video=SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,num_inference_steps=3 ,output_type="pt" ).frames
SCREAMING_SNAKE_CASE:str = np.array([-1.0_458_984, -1.1_279_297, -0.9_663_086, -0.91_503_906, -0.75_097_656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1e-2
| 139 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _snake_case ( _a , unittest.TestCase ):
_A : str = CTRLTokenizer
_A : List[str] = False
_A : int = False
def __UpperCamelCase ( self : Tuple ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE:Dict = ["adapt", "re@@", "a@@", "apt", "c@@", "t", "<unk>"]
SCREAMING_SNAKE_CASE:Optional[Any] = dict(zip(SCREAMING_SNAKE_CASE__ ,range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
SCREAMING_SNAKE_CASE:str = ["#version: 0.2", "a p", "ap t</w>", "r e", "a d", "ad apt</w>", ""]
SCREAMING_SNAKE_CASE:Union[str, Any] = {"unk_token": "<unk>"}
SCREAMING_SNAKE_CASE:Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE:Union[str, Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + "\n" )
with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp:
fp.write("\n".join(SCREAMING_SNAKE_CASE__ ) )
def __UpperCamelCase ( self : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : Any ):
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname ,**SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : int ,SCREAMING_SNAKE_CASE__ : Any ):
SCREAMING_SNAKE_CASE:Optional[Any] = "adapt react readapt apt"
SCREAMING_SNAKE_CASE:Tuple = "adapt react readapt apt"
return input_text, output_text
def __UpperCamelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE:List[str] = CTRLTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
SCREAMING_SNAKE_CASE:Any = "adapt react readapt apt"
SCREAMING_SNAKE_CASE:Any = "adapt re@@ a@@ c@@ t re@@ adapt apt".split()
SCREAMING_SNAKE_CASE:Union[str, Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Union[str, Any] = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE:Optional[int] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) ,SCREAMING_SNAKE_CASE__ )
| 139 | 1 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def a_ ( lowerCamelCase : Optional[int] ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def a_ ( lowerCamelCase : Dict , lowerCamelCase : Union[str, Any] ):
lowerCAmelCase = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
lowerCAmelCase = key.replace('heads.cmd.mim_head.cls.predictions' , 'mmm_image_head' )
lowerCAmelCase = key.replace('heads.cmd.mlm_head.cls.predictions' , 'mmm_text_head' )
lowerCAmelCase = key.replace('heads.cmd.itm_head.cls' , 'itm_head' )
lowerCAmelCase = key.replace('heads.cmd.itm_head.pooler' , 'itm_head.pooler' )
lowerCAmelCase = key.replace('heads.cmd.clip_head.logit_scale' , 'flava.logit_scale' )
lowerCAmelCase = key.replace('heads.fairseq_mlm.cls.predictions' , 'mlm_head' )
lowerCAmelCase = key.replace('heads.imagenet.mim_head.cls.predictions' , 'mim_head' )
lowerCAmelCase = key.replace('mm_text_projection' , 'flava.text_to_mm_projection' )
lowerCAmelCase = key.replace('mm_image_projection' , 'flava.image_to_mm_projection' )
lowerCAmelCase = key.replace('image_encoder.module' , 'flava.image_model' )
lowerCAmelCase = key.replace('text_encoder.module' , 'flava.text_model' )
lowerCAmelCase = key.replace('mm_encoder.module.encoder.cls_token' , 'flava.multimodal_model.cls_token' )
lowerCAmelCase = key.replace('mm_encoder.module' , 'flava.multimodal_model' )
lowerCAmelCase = key.replace('text_projection' , 'flava.text_projection' )
lowerCAmelCase = key.replace('image_projection' , 'flava.image_projection' )
lowerCAmelCase = value.float()
for key, value in codebook_state_dict.items():
lowerCAmelCase = value
return upgrade
@torch.no_grad()
def a_ ( lowerCamelCase : Union[str, Any] , lowerCamelCase : int , lowerCamelCase : str , lowerCamelCase : Optional[Any]=None ):
if config_path is not None:
lowerCAmelCase = FlavaConfig.from_pretrained(lowerCamelCase )
else:
lowerCAmelCase = FlavaConfig()
lowerCAmelCase = FlavaForPreTraining(lowerCamelCase ).eval()
lowerCAmelCase = convert_dalle_checkpoint(lowerCamelCase , lowerCamelCase , save_checkpoint=lowerCamelCase )
if os.path.exists(lowerCamelCase ):
lowerCAmelCase = torch.load(lowerCamelCase , map_location='cpu' )
else:
lowerCAmelCase = torch.hub.load_state_dict_from_url(lowerCamelCase , map_location='cpu' )
lowerCAmelCase = upgrade_state_dict(lowerCamelCase , lowerCamelCase )
hf_model.load_state_dict(lowerCamelCase )
lowerCAmelCase = hf_model.state_dict()
lowerCAmelCase = count_parameters(lowerCamelCase )
lowerCAmelCase = count_parameters(lowerCamelCase ) + count_parameters(lowerCamelCase )
assert torch.allclose(lowerCamelCase , lowerCamelCase , atol=1e-3 )
hf_model.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""")
parser.add_argument("""--codebook_path""", default=None, type=str, help="""Path to flava codebook checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
__snake_case =parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 55 |
'''simple docstring'''
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class UpperCAmelCase_ ( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
lowerCamelCase : Any = StableUnCLIPPipeline
lowerCamelCase : int = TEXT_TO_IMAGE_PARAMS
lowerCamelCase : Any = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCamelCase : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
lowerCamelCase : Optional[int] = False
def __UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
lowerCAmelCase = 3_2
lowerCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowerCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
lowerCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCAmelCase__ , projection_dim=UpperCAmelCase__ , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
lowerCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=1_2 , embedding_dim=UpperCAmelCase__ , num_layers=1 , )
torch.manual_seed(0 )
lowerCAmelCase = DDPMScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1_0_0_0 , clip_sample=UpperCAmelCase__ , clip_sample_range=5.0 , beta_schedule='squaredcos_cap_v2' , )
# regular denoising components
torch.manual_seed(0 )
lowerCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=UpperCAmelCase__ )
lowerCAmelCase = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
lowerCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
lowerCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCAmelCase__ , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
lowerCAmelCase = UNetaDConditionModel(
sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(3_2, 6_4) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=UpperCAmelCase__ , layers_per_block=1 , upcast_attention=UpperCAmelCase__ , use_linear_projection=UpperCAmelCase__ , )
torch.manual_seed(0 )
lowerCAmelCase = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.00_085 , beta_end=0.012 , prediction_type='v_prediction' , set_alpha_to_one=UpperCAmelCase__ , steps_offset=1 , )
torch.manual_seed(0 )
lowerCAmelCase = AutoencoderKL()
lowerCAmelCase = {
# prior components
'prior_tokenizer': prior_tokenizer,
'prior_text_encoder': prior_text_encoder,
'prior': prior,
'prior_scheduler': prior_scheduler,
# image noising components
'image_normalizer': image_normalizer,
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder,
'unet': unet,
'scheduler': scheduler,
'vae': vae,
}
return components
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any]=0 ) -> Optional[Any]:
if str(UpperCAmelCase__ ).startswith('mps' ):
lowerCAmelCase = torch.manual_seed(UpperCAmelCase__ )
else:
lowerCAmelCase = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
lowerCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'prior_num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
lowerCAmelCase = torch_device == 'cpu'
self._test_attention_slicing_forward_pass(test_max_difference=UpperCAmelCase__ )
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
lowerCAmelCase = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=UpperCAmelCase__ )
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : Union[str, Any] ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
lowerCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy' )
lowerCAmelCase = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCAmelCase = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase = pipe('anime turle' , generator=UpperCAmelCase__ , output_type='np' )
lowerCAmelCase = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(UpperCAmelCase__ , UpperCAmelCase__ )
def __UpperCAmelCase ( self : Any ) -> Optional[int]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
lowerCAmelCase = pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCAmelCase = pipe(
'anime turtle' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='np' , )
lowerCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 1_0**9
| 55 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
_UpperCAmelCase : List[Any] = {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""",
"""umberto-commoncrawl-cased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"""
),
"""umberto-wikipedia-uncased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"""
),
}
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : List[Any] = 'camembert'
def __init__(self , __lowercase=3_05_22 , __lowercase=7_68 , __lowercase=12 , __lowercase=12 , __lowercase=30_72 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=5_12 , __lowercase=2 , __lowercase=0.0_2 , __lowercase=1e-12 , __lowercase=1 , __lowercase=0 , __lowercase=2 , __lowercase="absolute" , __lowercase=True , __lowercase=None , **__lowercase , ):
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_act
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = position_embedding_type
__lowerCAmelCase = use_cache
__lowerCAmelCase = classifier_dropout
class a__ ( __A ):
"""simple docstring"""
@property
def _snake_case (self ):
if self.task == "multiple-choice":
__lowerCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__lowerCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 174 |
'''simple docstring'''
def __magic_name__( lowerCamelCase, lowerCamelCase):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
__lowerCAmelCase = (boundary[1] - boundary[0]) / steps
__lowerCAmelCase = boundary[0]
__lowerCAmelCase = boundary[1]
__lowerCAmelCase = make_points(lowerCamelCase, lowerCamelCase, lowerCamelCase)
__lowerCAmelCase = 0.0
y += (h / 2.0) * f(lowerCamelCase)
for i in x_i:
# print(i)
y += h * f(lowerCamelCase)
y += (h / 2.0) * f(lowerCamelCase)
return y
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = a + h
while x < (b - h):
yield x
__lowerCAmelCase = x + h
def __magic_name__( lowerCamelCase): # enter your function here
__lowerCAmelCase = (x - 0) * (x - 0)
return y
def __magic_name__( ):
__lowerCAmelCase = 0.0 # Lower bound of integration
__lowerCAmelCase = 1.0 # Upper bound of integration
__lowerCAmelCase = 10.0 # define number of steps or resolution
__lowerCAmelCase = [a, b] # define boundary of integration
__lowerCAmelCase = method_a(lowerCamelCase, lowerCamelCase)
print(F"""y = {y}""")
if __name__ == "__main__":
main()
| 174 | 1 |
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class lowerCamelCase ( A_ ):
def UpperCAmelCase(self : int ) -> Tuple:
snake_case = tempfile.mkdtemp()
snake_case = 8
# DPR tok
snake_case = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
snake_case = os.path.join(self.tmpdirname , "dpr_tokenizer" )
os.makedirs(_A , exist_ok=_A )
snake_case = os.path.join(_A , DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
snake_case = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
snake_case = dict(zip(_A , range(len(_A ) ) ) )
snake_case = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
snake_case = {"unk_token": "<unk>"}
snake_case = os.path.join(self.tmpdirname , "bart_tokenizer" )
os.makedirs(_A , exist_ok=_A )
snake_case = os.path.join(_A , BART_VOCAB_FILES_NAMES["vocab_file"] )
snake_case = os.path.join(_A , BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_A ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_A ) )
def UpperCAmelCase(self : Optional[int] ) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def UpperCAmelCase(self : List[Any] ) -> DPRContextEncoderTokenizer:
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def UpperCAmelCase(self : int ) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) )
def UpperCAmelCase(self : Dict ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase(self : Optional[Any] ) -> Dict:
snake_case = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def UpperCAmelCase(self : Union[str, Any] ) -> str:
snake_case = self.get_dummy_dataset()
snake_case = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
snake_case = dataset
snake_case = RagRetriever(
_A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def UpperCAmelCase(self : Dict , _A : bool ) -> List[str]:
snake_case = self.get_dummy_dataset()
snake_case = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="custom" , )
if from_disk:
snake_case = os.path.join(self.tmpdirname , "dataset" )
snake_case = os.path.join(self.tmpdirname , "index.faiss" )
dataset.get_index("embeddings" ).save(os.path.join(self.tmpdirname , "index.faiss" ) )
dataset.drop_index("embeddings" )
dataset.save_to_disk(os.path.join(self.tmpdirname , "dataset" ) )
del dataset
snake_case = RagRetriever(
_A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
snake_case = RagRetriever(
_A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , _A ) , )
return retriever
def UpperCAmelCase(self : Optional[Any] ) -> str:
snake_case = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
snake_case = os.path.join(self.tmpdirname , "hf_bert_base.hnswSQ8_correct_phi_128.c_index" )
dataset.save_faiss_index("embeddings" , index_file_name + ".index.dpr" )
pickle.dump(dataset["id"] , open(index_file_name + ".index_meta.dpr" , "wb" ) )
snake_case = os.path.join(self.tmpdirname , "psgs_w100.tsv.pkl" )
snake_case = {sample["id"]: [sample["text"], sample["title"]] for sample in dataset}
pickle.dump(_A , open(_A , "wb" ) )
snake_case = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="legacy" , index_path=self.tmpdirname , )
snake_case = RagRetriever(
_A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def UpperCAmelCase(self : Dict ) -> Dict:
snake_case = 1
snake_case = self.get_dummy_canonical_hf_index_retriever()
snake_case = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case , snake_case , snake_case = retriever.retrieve(_A , n_docs=_A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , _A )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase(self : Optional[int] ) -> List[str]:
snake_case = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
snake_case = self.get_dummy_dataset()
retriever.save_pretrained(_A )
snake_case = RagRetriever.from_pretrained(_A )
self.assertIsInstance(_A , _A )
snake_case = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case = retriever.retrieve(_A , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase(self : List[Any] ) -> int:
snake_case = 1
snake_case = self.get_dummy_custom_hf_index_retriever(from_disk=_A )
snake_case = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case , snake_case , snake_case = retriever.retrieve(_A , n_docs=_A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , _A )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase(self : Union[str, Any] ) -> str:
snake_case = self.get_dummy_custom_hf_index_retriever(from_disk=_A )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_A )
snake_case = RagRetriever.from_pretrained(_A )
self.assertIsInstance(_A , _A )
snake_case = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case = retriever.retrieve(_A , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase(self : List[Any] ) -> List[str]:
snake_case = 1
snake_case = self.get_dummy_custom_hf_index_retriever(from_disk=_A )
snake_case = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case , snake_case , snake_case = retriever.retrieve(_A , n_docs=_A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , _A )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase(self : Optional[int] ) -> Optional[int]:
snake_case = self.get_dummy_custom_hf_index_retriever(from_disk=_A )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_A )
snake_case = RagRetriever.from_pretrained(_A )
self.assertIsInstance(_A , _A )
snake_case = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case = retriever.retrieve(_A , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase(self : Any ) -> Any:
snake_case = 1
snake_case = self.get_dummy_legacy_index_retriever()
snake_case = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case , snake_case , snake_case = retriever.retrieve(_A , n_docs=_A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["text", "title"] )
self.assertEqual(len(doc_dicts[0]["text"] ) , _A )
self.assertEqual(doc_dicts[0]["text"][0] , "bar" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["text"][0] , "foo" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase(self : str ) -> Dict:
snake_case = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_A )
snake_case = RagRetriever.from_pretrained(_A )
self.assertIsInstance(_A , _A )
snake_case = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case = retriever.retrieve(_A , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCAmelCase(self : Dict ) -> Dict:
import torch
snake_case = 1
snake_case = self.get_dummy_canonical_hf_index_retriever()
snake_case = [[5, 7], [1_0, 1_1]]
snake_case = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case = retriever(_A , _A , prefix=retriever.config.generator.prefix , n_docs=_A )
snake_case , snake_case , snake_case = (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_A , _A )
self.assertIsInstance(_A , _A )
self.assertIsInstance(_A , np.ndarray )
snake_case = retriever(
_A , _A , prefix=retriever.config.generator.prefix , n_docs=_A , return_tensors="pt" , )
snake_case , snake_case , snake_case , snake_case = ( # noqa: F841
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
out["doc_ids"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_A , torch.Tensor )
self.assertIsInstance(_A , torch.Tensor )
self.assertIsInstance(_A , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCAmelCase(self : Any ) -> Any:
snake_case = self.get_dpr_ctx_encoder_tokenizer()
snake_case = 1
snake_case = self.get_dummy_custom_hf_index_retriever(from_disk=_A )
retriever.set_ctx_encoder_tokenizer(_A )
snake_case = [[5, 7], [1_0, 1_1]]
snake_case = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case = retriever(_A , _A , prefix=retriever.config.generator.prefix , n_docs=_A )
self.assertEqual(
len(_A ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask") ) , _A ) # check for doc token related keys in dictionary.
| 137 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
_A = logging.get_logger(__name__)
_A = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_A = {
"vocab_file": {"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"},
"tokenizer_file": {
"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"
},
}
_A = {"mobilebert-uncased": 5_12}
_A = {}
class lowerCamelCase ( A_ ):
UpperCAmelCase__ : Tuple = VOCAB_FILES_NAMES
UpperCAmelCase__ : Any = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Tuple = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Optional[Any] = MobileBertTokenizer
def __init__(self : Any , _A : str=None , _A : str=None , _A : Union[str, Any]=True , _A : Optional[Any]="[UNK]" , _A : int="[SEP]" , _A : Dict="[PAD]" , _A : int="[CLS]" , _A : Union[str, Any]="[MASK]" , _A : Any=True , _A : Dict=None , **_A : List[str] , ) -> List[str]:
super().__init__(
_A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , )
snake_case = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , _A ) != do_lower_case
or normalizer_state.get("strip_accents" , _A ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , _A ) != tokenize_chinese_chars
):
snake_case = getattr(_A , normalizer_state.pop("type" ) )
snake_case = do_lower_case
snake_case = strip_accents
snake_case = tokenize_chinese_chars
snake_case = normalizer_class(**_A )
snake_case = do_lower_case
def UpperCAmelCase(self : List[str] , _A : Union[str, Any] , _A : Dict=None ) -> Optional[Any]:
snake_case = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase(self : Union[str, Any] , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]:
snake_case = [self.sep_token_id]
snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase(self : int , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
snake_case = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
| 137 | 1 |
"""simple docstring"""
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
A_ : Tuple = logging.get_logger(__name__)
A_ : Tuple = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
A_ : Optional[Any] = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
A_ : str = {
"""facebook/blenderbot_small-90M""": 512,
}
class lowerCamelCase (UpperCAmelCase__ ):
lowerCamelCase__ : str = VOCAB_FILES_NAMES
lowerCamelCase__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : str = BlenderbotSmallTokenizer
def __init__( self : List[str] , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : Optional[Any]="<|endoftext|>" , __UpperCAmelCase : Union[str, Any]="<|endoftext|>" , __UpperCAmelCase : Any="<|endoftext|>" , __UpperCAmelCase : Union[str, Any]=False , __UpperCAmelCase : int=True , **__UpperCAmelCase : Dict , ) -> Tuple:
super().__init__(
ByteLevelBPETokenizer(
vocab=UpperCAmelCase__ , merges=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ , ) , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , **UpperCAmelCase__ , )
SCREAMING_SNAKE_CASE__ = add_prefix_space
def SCREAMING_SNAKE_CASE ( self : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[Any]=None ) -> Dict:
SCREAMING_SNAKE_CASE__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 165 |
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any:
"""simple docstring"""
A__ = [0] * len(lowercase_ )
A__ = []
A__ = [1] * len(lowercase_ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(lowercase_ ) ):
if indegree[i] == 0:
queue.append(lowercase_ )
while queue:
A__ = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
A__ = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(lowercase_ )
print(max(lowercase_ ) )
# Adjacency list of Graph
_lowerCamelCase : Optional[int] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 14 | 0 |
class __lowercase :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : int=None):
SCREAMING_SNAKE_CASE_: Union[str, Any] = data
SCREAMING_SNAKE_CASE_: str = previous
SCREAMING_SNAKE_CASE_: Dict = next_node
def __str__( self : Union[str, Any]):
return F"{self.data}"
def _SCREAMING_SNAKE_CASE ( self : Tuple):
return self.data
def _SCREAMING_SNAKE_CASE ( self : str):
return self.next
def _SCREAMING_SNAKE_CASE ( self : Dict):
return self.previous
class __lowercase :
"""simple docstring"""
def __init__( self : int , lowerCAmelCase__ : List[Any]):
SCREAMING_SNAKE_CASE_: Any = head
def __iter__( self : Optional[int]):
return self
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
if not self.current:
raise StopIteration
else:
SCREAMING_SNAKE_CASE_: Tuple = self.current.get_data()
SCREAMING_SNAKE_CASE_: Tuple = self.current.get_next()
return value
class __lowercase :
"""simple docstring"""
def __init__( self : List[Any]):
SCREAMING_SNAKE_CASE_: Tuple = None # First node in list
SCREAMING_SNAKE_CASE_: Dict = None # Last node in list
def __str__( self : List[str]):
SCREAMING_SNAKE_CASE_: List[str] = self.head
SCREAMING_SNAKE_CASE_: Tuple = []
while current is not None:
nodes.append(current.get_data())
SCREAMING_SNAKE_CASE_: List[str] = current.get_next()
return " ".join(str(__snake_case) for node in nodes)
def __contains__( self : Optional[Any] , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: Dict = self.head
while current:
if current.get_data() == value:
return True
SCREAMING_SNAKE_CASE_: Any = current.get_next()
return False
def __iter__( self : Dict):
return LinkedListIterator(self.head)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
if self.head:
return self.head.get_data()
return None
def _SCREAMING_SNAKE_CASE ( self : Dict):
if self.tail:
return self.tail.get_data()
return None
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : Node):
if self.head is None:
SCREAMING_SNAKE_CASE_: Tuple = node
SCREAMING_SNAKE_CASE_: int = node
else:
self.insert_before_node(self.head , __snake_case)
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : Node):
if self.head is None:
self.set_head(__snake_case)
else:
self.insert_after_node(self.tail , __snake_case)
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: List[Any] = Node(__snake_case)
if self.head is None:
self.set_head(__snake_case)
else:
self.set_tail(__snake_case)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : Node , lowerCAmelCase__ : Node):
SCREAMING_SNAKE_CASE_: Union[str, Any] = node
SCREAMING_SNAKE_CASE_: List[Any] = node.previous
if node.get_previous() is None:
SCREAMING_SNAKE_CASE_: str = node_to_insert
else:
SCREAMING_SNAKE_CASE_: Optional[int] = node_to_insert
SCREAMING_SNAKE_CASE_: Any = node_to_insert
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Node , lowerCAmelCase__ : Node):
SCREAMING_SNAKE_CASE_: Union[str, Any] = node
SCREAMING_SNAKE_CASE_: Dict = node.next
if node.get_next() is None:
SCREAMING_SNAKE_CASE_: int = node_to_insert
else:
SCREAMING_SNAKE_CASE_: List[str] = node_to_insert
SCREAMING_SNAKE_CASE_: Union[str, Any] = node_to_insert
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: int = 1
SCREAMING_SNAKE_CASE_: str = Node(__snake_case)
SCREAMING_SNAKE_CASE_: Optional[int] = self.head
while node:
if current_position == position:
self.insert_before_node(__snake_case , __snake_case)
return
current_position += 1
SCREAMING_SNAKE_CASE_: Union[str, Any] = node.next
self.insert_after_node(self.tail , __snake_case)
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: int = self.head
while node:
if node.get_data() == item:
return node
SCREAMING_SNAKE_CASE_: List[Any] = node.get_next()
raise Exception("Node not found")
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : Dict):
if (node := self.get_node(__snake_case)) is not None:
if node == self.head:
SCREAMING_SNAKE_CASE_: Any = self.head.get_next()
if node == self.tail:
SCREAMING_SNAKE_CASE_: int = self.tail.get_previous()
self.remove_node_pointers(__snake_case)
@staticmethod
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : Node):
if node.get_next():
SCREAMING_SNAKE_CASE_: int = node.previous
if node.get_previous():
SCREAMING_SNAKE_CASE_: List[Any] = node.next
SCREAMING_SNAKE_CASE_: List[str] = None
SCREAMING_SNAKE_CASE_: Optional[int] = None
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
return self.head is None
def A_ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 353 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
lowerCAmelCase : int = {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/config.json""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/config.json"""
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : int = '''fnet'''
def __init__( self : Optional[Any] , lowerCAmelCase__ : List[str]=3_2000 , lowerCAmelCase__ : Union[str, Any]=768 , lowerCAmelCase__ : Optional[Any]=12 , lowerCAmelCase__ : Tuple=3072 , lowerCAmelCase__ : Any="gelu_new" , lowerCAmelCase__ : Union[str, Any]=0.1 , lowerCAmelCase__ : Optional[Any]=512 , lowerCAmelCase__ : int=4 , lowerCAmelCase__ : List[Any]=0.02 , lowerCAmelCase__ : Union[str, Any]=1E-12 , lowerCAmelCase__ : Optional[int]=False , lowerCAmelCase__ : Any=512 , lowerCAmelCase__ : Optional[int]=3 , lowerCAmelCase__ : int=1 , lowerCAmelCase__ : List[Any]=2 , **lowerCAmelCase__ : Optional[int] , ):
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = vocab_size
SCREAMING_SNAKE_CASE_: Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_: Tuple = hidden_size
SCREAMING_SNAKE_CASE_: Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_: Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE_: Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE_: Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE_: int = initializer_range
SCREAMING_SNAKE_CASE_: Optional[int] = type_vocab_size
SCREAMING_SNAKE_CASE_: int = layer_norm_eps
SCREAMING_SNAKE_CASE_: Union[str, Any] = use_tpu_fourier_optimizations
SCREAMING_SNAKE_CASE_: int = tpu_short_seq_length
| 127 | 0 |
'''simple docstring'''
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def a ( __a , __a , __a ) -> List[str]:
'''simple docstring'''
if isinstance(__a , torch.Tensor ):
return image
elif isinstance(__a , PIL.Image.Image ):
UpperCamelCase__ :Tuple = [image]
if isinstance(image[0] , PIL.Image.Image ):
UpperCamelCase__ :str = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
UpperCamelCase__ :Optional[int] = np.concatenate(__a , axis=0 )
UpperCamelCase__ :List[str] = np.array(__a ).astype(np.floataa ) / 2_5_5.0
UpperCamelCase__ :int = image.transpose(0 , 3 , 1 , 2 )
UpperCamelCase__ :List[str] = 2.0 * image - 1.0
UpperCamelCase__ :int = torch.from_numpy(__a )
elif isinstance(image[0] , torch.Tensor ):
UpperCamelCase__ :int = torch.cat(__a , dim=0 )
return image
def a ( __a , __a , __a , __a=0.9_9_9_5 ) -> Dict:
'''simple docstring'''
if not isinstance(__a , np.ndarray ):
UpperCamelCase__ :str = True
UpperCamelCase__ :Optional[int] = va.device
UpperCamelCase__ :List[str] = va.cpu().numpy()
UpperCamelCase__ :int = va.cpu().numpy()
UpperCamelCase__ :List[str] = np.sum(va * va / (np.linalg.norm(__a ) * np.linalg.norm(__a )) )
if np.abs(__a ) > DOT_THRESHOLD:
UpperCamelCase__ :List[str] = (1 - t) * va + t * va
else:
UpperCamelCase__ :Union[str, Any] = np.arccos(__a )
UpperCamelCase__ :Optional[int] = np.sin(__a )
UpperCamelCase__ :Dict = theta_a * t
UpperCamelCase__ :Optional[Any] = np.sin(__a )
UpperCamelCase__ :Union[str, Any] = np.sin(theta_a - theta_t ) / sin_theta_a
UpperCamelCase__ :str = sin_theta_t / sin_theta_a
UpperCamelCase__ :Optional[int] = sa * va + sa * va
if inputs_are_torch:
UpperCamelCase__ :List[str] = torch.from_numpy(__a ).to(__a )
return va
def a ( __a , __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :str = F.normalize(__a , dim=-1 )
UpperCamelCase__ :Any = F.normalize(__a , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def a ( __a , __a ) -> Any:
'''simple docstring'''
for param in model.parameters():
UpperCamelCase__ :Any = value
class lowercase ( A__ ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , ):
'''simple docstring'''
super().__init__()
self.register_modules(
vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , clip_model=UpperCamelCase_ , tokenizer=UpperCamelCase_ , unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , feature_extractor=UpperCamelCase_ , coca_model=UpperCamelCase_ , coca_tokenizer=UpperCamelCase_ , coca_transform=UpperCamelCase_ , )
UpperCamelCase__ :List[Any] = (
feature_extractor.size
if isinstance(feature_extractor.size , UpperCamelCase_ )
else feature_extractor.size['''shortest_edge''']
)
UpperCamelCase__ :List[str] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , UpperCamelCase_ )
set_requires_grad(self.clip_model , UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase__ :Any = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
self.enable_attention_slicing(UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
set_requires_grad(self.vae , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
set_requires_grad(self.vae , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
set_requires_grad(self.unet , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
set_requires_grad(self.unet , UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :int = min(int(num_inference_steps * strength ) , UpperCamelCase_ )
UpperCamelCase__ :Dict = max(num_inference_steps - init_timestep , 0 )
UpperCamelCase__ :int = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None ):
'''simple docstring'''
if not isinstance(UpperCamelCase_ , torch.Tensor ):
raise ValueError(F'''`image` has to be of type `torch.Tensor` but is {type(UpperCamelCase_ )}''' )
UpperCamelCase__ :List[str] = image.to(device=UpperCamelCase_ , dtype=UpperCamelCase_ )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase__ :int = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(UpperCamelCase_ )
]
UpperCamelCase__ :str = torch.cat(UpperCamelCase_ , dim=0 )
else:
UpperCamelCase__ :Tuple = self.vae.encode(UpperCamelCase_ ).latent_dist.sample(UpperCamelCase_ )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCamelCase__ :str = 0.18215 * init_latents
UpperCamelCase__ :Optional[Any] = init_latents.repeat_interleave(UpperCamelCase_ , dim=0 )
UpperCamelCase__ :Any = randn_tensor(init_latents.shape , generator=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_ )
# get latents
UpperCamelCase__ :Tuple = self.scheduler.add_noise(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :List[Any] = init_latents
return latents
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Any = self.coca_transform(UpperCamelCase_ ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
UpperCamelCase__ :List[str] = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
UpperCamelCase__ :int = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('''<end_of_text>''' )[0].replace('''<start_of_text>''' , '''''' ).rstrip(''' .,''' )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = self.feature_extractor.preprocess(UpperCamelCase_ )
UpperCamelCase__ :Any = torch.from_numpy(clip_image_input['''pixel_values'''][0] ).unsqueeze(0 ).to(self.device ).half()
UpperCamelCase__ :str = self.clip_model.get_image_features(UpperCamelCase_ )
UpperCamelCase__ :List[str] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = image_embeddings_clip.repeat_interleave(UpperCamelCase_ , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ):
'''simple docstring'''
UpperCamelCase__ :List[str] = latents.detach().requires_grad_()
UpperCamelCase__ :List[Any] = self.scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ )
# predict the noise residual
UpperCamelCase__ :List[Any] = self.unet(UpperCamelCase_ , UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
UpperCamelCase__ :Union[str, Any] = self.scheduler.alphas_cumprod[timestep]
UpperCamelCase__ :Optional[int] = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCamelCase__ :List[Any] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
UpperCamelCase__ :Any = torch.sqrt(UpperCamelCase_ )
UpperCamelCase__ :Dict = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , UpperCamelCase_ ):
UpperCamelCase__ :Any = self.scheduler.sigmas[index]
UpperCamelCase__ :Union[str, Any] = latents - sigma * noise_pred
else:
raise ValueError(F'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCamelCase__ :Dict = 1 / 0.18215 * sample
UpperCamelCase__ :List[Any] = self.vae.decode(UpperCamelCase_ ).sample
UpperCamelCase__ :Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase__ :List[Any] = transforms.Resize(self.feature_extractor_size )(UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = self.normalize(UpperCamelCase_ ).to(latents.dtype )
UpperCamelCase__ :Union[str, Any] = self.clip_model.get_image_features(UpperCamelCase_ )
UpperCamelCase__ :Any = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=UpperCamelCase_ )
UpperCamelCase__ :Any = spherical_dist_loss(UpperCamelCase_ , UpperCamelCase_ ).mean() * clip_guidance_scale
UpperCamelCase__ :str = -torch.autograd.grad(UpperCamelCase_ , UpperCamelCase_ )[0]
if isinstance(self.scheduler , UpperCamelCase_ ):
UpperCamelCase__ :List[Any] = latents.detach() + grads * (sigma**2)
UpperCamelCase__ :Optional[int] = noise_pred_original
else:
UpperCamelCase__ :Tuple = noise_pred_original - torch.sqrt(UpperCamelCase_ ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = 512 , UpperCamelCase_ = 512 , UpperCamelCase_ = 0.6 , UpperCamelCase_ = 50 , UpperCamelCase_ = 7.5 , UpperCamelCase_ = 1 , UpperCamelCase_ = 0.0 , UpperCamelCase_ = 100 , UpperCamelCase_ = None , UpperCamelCase_ = "pil" , UpperCamelCase_ = True , UpperCamelCase_ = 0.8 , UpperCamelCase_ = 0.1 , UpperCamelCase_ = 0.1 , ):
'''simple docstring'''
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) != batch_size:
raise ValueError(F'''You have passed {batch_size} batch_size, but only {len(UpperCamelCase_ )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(UpperCamelCase_ , torch.Generator ) and batch_size > 1:
UpperCamelCase__ :List[Any] = [generator] + [None] * (batch_size - 1)
UpperCamelCase__ :Union[str, Any] = [
('''model''', self.coca_model is None),
('''tokenizer''', self.coca_tokenizer is None),
('''transform''', self.coca_transform is None),
]
UpperCamelCase__ :Union[str, Any] = [x[0] for x in coca_is_none if x[1]]
UpperCamelCase__ :Union[str, Any] = ''', '''.join(UpperCamelCase_ )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(UpperCamelCase_ ):
raise ValueError(
F'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
F'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
UpperCamelCase__ :List[str] = self.get_image_description(UpperCamelCase_ )
if style_prompt is None:
if len(UpperCamelCase_ ):
raise ValueError(
F'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
F''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
UpperCamelCase__ :List[Any] = self.get_image_description(UpperCamelCase_ )
# get prompt text embeddings for content and style
UpperCamelCase__ :Union[str, Any] = self.tokenizer(
UpperCamelCase_ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=UpperCamelCase_ , return_tensors='''pt''' , )
UpperCamelCase__ :Any = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
UpperCamelCase__ :Any = self.tokenizer(
UpperCamelCase_ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=UpperCamelCase_ , return_tensors='''pt''' , )
UpperCamelCase__ :Tuple = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
UpperCamelCase__ :str = slerp(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# duplicate text embeddings for each generation per prompt
UpperCamelCase__ :List[str] = text_embeddings.repeat_interleave(UpperCamelCase_ , dim=0 )
# set timesteps
UpperCamelCase__ :Dict = '''offset''' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
UpperCamelCase__ :Any = {}
if accepts_offset:
UpperCamelCase__ :Tuple = 1
self.scheduler.set_timesteps(UpperCamelCase_ , **UpperCamelCase_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
UpperCamelCase__ , UpperCamelCase__ :int = self.get_timesteps(UpperCamelCase_ , UpperCamelCase_ , self.device )
UpperCamelCase__ :Optional[Any] = timesteps[:1].repeat(UpperCamelCase_ )
# Preprocess image
UpperCamelCase__ :Any = preprocess(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :Dict = self.prepare_latents(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , text_embeddings.dtype , self.device , UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = preprocess(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = self.prepare_latents(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , text_embeddings.dtype , self.device , UpperCamelCase_ )
UpperCamelCase__ :int = slerp(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if clip_guidance_scale > 0:
UpperCamelCase__ :Optional[Any] = self.get_clip_image_embeddings(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :List[str] = self.get_clip_image_embeddings(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :Dict = slerp(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCamelCase__ :Any = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase__ :str = content_text_input.input_ids.shape[-1]
UpperCamelCase__ :List[str] = self.tokenizer([''''''] , padding='''max_length''' , max_length=UpperCamelCase_ , return_tensors='''pt''' )
UpperCamelCase__ :Optional[int] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
UpperCamelCase__ :Optional[Any] = uncond_embeddings.repeat_interleave(UpperCamelCase_ , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase__ :Optional[int] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCamelCase__ :Optional[int] = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
UpperCamelCase__ :Dict = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
UpperCamelCase__ :Any = torch.randn(UpperCamelCase_ , generator=UpperCamelCase_ , device='''cpu''' , dtype=UpperCamelCase_ ).to(
self.device )
else:
UpperCamelCase__ :int = torch.randn(UpperCamelCase_ , generator=UpperCamelCase_ , device=self.device , dtype=UpperCamelCase_ )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
UpperCamelCase__ :Union[str, Any] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase__ :Union[str, Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCamelCase__ :Any = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase__ :Any = {}
if accepts_eta:
UpperCamelCase__ :Union[str, Any] = eta
# check if the scheduler accepts generator
UpperCamelCase__ :int = '''generator''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
UpperCamelCase__ :List[Any] = generator
with self.progress_bar(total=UpperCamelCase_ ):
for i, t in enumerate(UpperCamelCase_ ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase__ :Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase__ :str = self.scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ )
# predict the noise residual
UpperCamelCase__ :str = self.unet(UpperCamelCase_ , UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase__ , UpperCamelCase__ :List[str] = noise_pred.chunk(2 )
UpperCamelCase__ :Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
UpperCamelCase__ :Dict = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = self.cond_fn(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase__ :Dict = self.scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCamelCase__ :Any = 1 / 0.18215 * latents
UpperCamelCase__ :Union[str, Any] = self.vae.decode(UpperCamelCase_ ).sample
UpperCamelCase__ :Any = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase__ :Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase__ :Tuple = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=UpperCamelCase_ , nsfw_content_detected=UpperCamelCase_ ) | 97 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
__snake_case = True
except (ImportError, ModuleNotFoundError):
__snake_case = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def a ( __a ) -> str:
'''simple docstring'''
re.sub('''<n>''' , '''''' , __a ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__a ) ) | 97 | 1 |
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCAmelCase ( __A , unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = ort.SessionOptions()
A_ : Optional[Any] = False
return options
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
A_ : List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
A_ : Tuple = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'runwayml/stable-diffusion-inpainting' , revision='onnx' , safety_checker=lowercase , feature_extractor=lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase )
A_ : List[str] = 'A red cat sitting on a park bench'
A_ : Tuple = np.random.RandomState(0 )
A_ : Any = pipe(
prompt=lowercase , image=lowercase , mask_image=lowercase , guidance_scale=7.5 , num_inference_steps=1_0 , generator=lowercase , output_type='np' , )
A_ : Optional[int] = output.images
A_ : Any = images[0, 2_5_5:2_5_8, 2_5_5:2_5_8, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
A_ : Optional[int] = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
A_ : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
A_ : int = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-inpainting' , subfolder='scheduler' , revision='onnx' )
A_ : List[Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'runwayml/stable-diffusion-inpainting' , revision='onnx' , scheduler=lowercase , safety_checker=lowercase , feature_extractor=lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase )
A_ : List[str] = 'A red cat sitting on a park bench'
A_ : Tuple = np.random.RandomState(0 )
A_ : str = pipe(
prompt=lowercase , image=lowercase , mask_image=lowercase , guidance_scale=7.5 , num_inference_steps=2_0 , generator=lowercase , output_type='np' , )
A_ : Tuple = output.images
A_ : Tuple = images[0, 2_5_5:2_5_8, 2_5_5:2_5_8, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
A_ : List[Any] = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 351 | import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
_UpperCAmelCase = {
"""n_samples""": 64,
"""horizon""": 32,
"""num_inference_steps""": 20,
"""n_guide_steps""": 2, # can set to 0 for faster sampling, does not use value network
"""scale_grad_by_std""": True,
"""scale""": 0.1,
"""eta""": 0.0,
"""t_grad_cutoff""": 2,
"""device""": """cpu""",
}
if __name__ == "__main__":
_UpperCAmelCase = """hopper-medium-v2"""
_UpperCAmelCase = gym.make(env_name)
_UpperCAmelCase = ValueGuidedRLPipeline.from_pretrained(
"""bglick13/hopper-medium-v2-value-function-hor32""",
env=env,
)
env.seed(0)
_UpperCAmelCase = env.reset()
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 1000
_UpperCAmelCase = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
_UpperCAmelCase = pipeline(obs, planning_horizon=32)
# execute action in environment
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = env.step(denorm_actions)
_UpperCAmelCase = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F"""Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"""
F""" {total_score}"""
)
# save observations for rendering
rollout.append(next_observation.copy())
_UpperCAmelCase = next_observation
except KeyboardInterrupt:
pass
print(F"""Total reward: {total_reward}""")
| 192 | 0 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
}
lowerCamelCase_ = {
'vocab_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'},
'merges_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'},
}
lowerCamelCase_ = {
'ctrl': 256,
}
lowerCamelCase_ = {
'Pregnancy': 16_8629,
'Christianity': 7675,
'Explain': 10_6423,
'Fitness': 6_3440,
'Saving': 6_3163,
'Ask': 2_7171,
'Ass': 9_5985,
'Joke': 16_3509,
'Questions': 4_5622,
'Thoughts': 4_9605,
'Retail': 5_2342,
'Feminism': 16_4338,
'Writing': 1_1992,
'Atheism': 19_2263,
'Netflix': 4_8616,
'Computing': 3_9639,
'Opinion': 4_3213,
'Alone': 4_4967,
'Funny': 5_8917,
'Gaming': 4_0358,
'Human': 4088,
'India': 1331,
'Joker': 7_7138,
'Diet': 3_6206,
'Legal': 1_1859,
'Norman': 4939,
'Tip': 7_2689,
'Weight': 5_2343,
'Movies': 4_6273,
'Running': 2_3425,
'Science': 2090,
'Horror': 3_7793,
'Confession': 6_0572,
'Finance': 1_2250,
'Politics': 1_6360,
'Scary': 19_1985,
'Support': 1_2654,
'Technologies': 3_2516,
'Teenage': 6_6160,
'Event': 3_2769,
'Learned': 6_7460,
'Notion': 18_2770,
'Wikipedia': 3_7583,
'Books': 6665,
'Extract': 7_6050,
'Confessions': 10_2701,
'Conspiracy': 7_5932,
'Links': 6_3674,
'Narcissus': 15_0425,
'Relationship': 5_4766,
'Relationships': 13_4796,
'Reviews': 4_1671,
'News': 4256,
'Translation': 2_6820,
'multilingual': 12_8406,
}
def snake_case ( A__ ):
UpperCAmelCase_ : Tuple = set()
UpperCAmelCase_ : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase_ : List[Any] = char
UpperCAmelCase_ : Dict = set(__snake_case )
return pairs
class UpperCamelCase_ (a_ ):
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = CONTROL_CODES
def __init__( self : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict="<unk>" , **lowerCAmelCase_ : List[str] ) -> List[Any]:
super().__init__(unk_token=_lowerCamelCase , **_lowerCamelCase )
with open(_lowerCamelCase , encoding="utf-8" ) as vocab_handle:
UpperCAmelCase_ : Optional[int] = json.load(_lowerCamelCase )
UpperCAmelCase_ : Optional[int] = {v: k for k, v in self.encoder.items()}
with open(_lowerCamelCase , encoding="utf-8" ) as merges_handle:
UpperCAmelCase_ : List[str] = merges_handle.read().split("\n" )[1:-1]
UpperCAmelCase_ : Union[str, Any] = [tuple(merge.split() ) for merge in merges]
UpperCAmelCase_ : Union[str, Any] = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
UpperCAmelCase_ : int = {}
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
return len(self.encoder )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : List[Any] ) -> Any:
if token in self.cache:
return self.cache[token]
UpperCAmelCase_ : Optional[int] = tuple(_lowerCamelCase )
UpperCAmelCase_ : Union[str, Any] = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
UpperCAmelCase_ : Any = get_pairs(_lowerCamelCase )
if not pairs:
return token
while True:
UpperCAmelCase_ : Optional[Any] = min(_lowerCamelCase , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(_lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = bigram
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : Union[str, Any] = 0
while i < len(_lowerCamelCase ):
try:
UpperCAmelCase_ : Any = word.index(_lowerCamelCase , _lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase_ : str = j
if word[i] == first and i < len(_lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase_ : Optional[Any] = tuple(_lowerCamelCase )
UpperCAmelCase_ : List[str] = new_word
if len(_lowerCamelCase ) == 1:
break
else:
UpperCAmelCase_ : str = get_pairs(_lowerCamelCase )
UpperCAmelCase_ : str = "@@ ".join(_lowerCamelCase )
UpperCAmelCase_ : Dict = word[:-4]
UpperCAmelCase_ : List[Any] = word
return word
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : List[Any] ) -> Optional[int]:
UpperCAmelCase_ : int = []
UpperCAmelCase_ : List[Any] = re.findall(R"\S+\n?" , _lowerCamelCase )
for token in words:
split_tokens.extend(list(self.bpe(_lowerCamelCase ).split(" " ) ) )
return split_tokens
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : int ) -> Optional[int]:
return self.encoder.get(_lowerCamelCase , self.encoder.get(self.unk_token ) )
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : Union[str, Any] ) -> Optional[Any]:
return self.decoder.get(_lowerCamelCase , self.unk_token )
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ : int = " ".join(_lowerCamelCase ).replace("@@ " , "" ).strip()
return out_string
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int = None ) -> Any:
if not os.path.isdir(_lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_ : List[str] = os.path.join(
_lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ : int = os.path.join(
_lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowerCamelCase , ensure_ascii=_lowerCamelCase ) + "\n" )
UpperCAmelCase_ : Optional[int] = 0
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
UpperCAmelCase_ : Any = token_index
writer.write(" ".join(_lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 268 |
"""simple docstring"""
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class a ( a_ ):
def __init__( self , _lowerCamelCase , _lowerCamelCase ):
super().__init__()
self.register_modules(unet=_lowerCamelCase , scheduler=_lowerCamelCase )
@torch.no_grad()
def __call__( self , _lowerCamelCase = 1 , _lowerCamelCase = None , _lowerCamelCase = 5_0 , _lowerCamelCase = "pil" , _lowerCamelCase = True , **_lowerCamelCase , ):
lowercase = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=_lowerCamelCase , )
lowercase = image.to(self.device )
# set step values
self.scheduler.set_timesteps(_lowerCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowercase = self.unet(_lowerCamelCase , _lowerCamelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowercase = self.scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ).prev_sample
lowercase = (image / 2 + 0.5).clamp(0 , 1 )
lowercase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase = self.numpy_to_pil(_lowerCamelCase )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=_lowerCamelCase ), "This is a local test"
| 220 | 0 |
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {}
__UpperCAmelCase = {}
__UpperCAmelCase = {}
def __UpperCamelCase ( lowercase__ : type , lowercase__ : Optional[str] , lowercase__ : Optional[List[str]] = None , ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : Dict = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})' )
lowerCAmelCase_ : str = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})' )
lowerCAmelCase_ : List[Any] = format_type
def __UpperCamelCase ( lowercase__ : Exception , lowercase__ : Optional[str] , lowercase__ : Optional[List[str]] = None ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Any = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
lowerCAmelCase_ : Dict = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['python'])
_register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow'])
_register_formatter(NumpyFormatter, 'numpy', aliases=['np'])
_register_formatter(PandasFormatter, 'pandas', aliases=['pd'])
_register_formatter(CustomFormatter, 'custom')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch'])
else:
__UpperCAmelCase = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.')
_register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch'])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, 'tensorflow', aliases=['tf'])
else:
__UpperCAmelCase = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.')
_register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf'])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, 'jax', aliases=[])
else:
__UpperCAmelCase = ValueError('JAX needs to be installed to be able to return JAX arrays.')
_register_unavailable_formatter(_jax_error, 'jax', aliases=[])
def __UpperCamelCase ( lowercase__ : Optional[str] ) -> Optional[str]:
'''simple docstring'''
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def __UpperCamelCase ( lowercase__ : Optional[str] , **lowercase__ : List[Any] ) -> Formatter:
'''simple docstring'''
lowerCAmelCase_ : int = get_format_type_from_alias(lowercase__ )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**lowercase__ )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'' )
| 28 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def __UpperCamelCase ( lowercase__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = """huggingface/label-files"""
lowerCAmelCase_ : int = """imagenet-1k-id2label.json"""
lowerCAmelCase_ : List[str] = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="""dataset""" ) , """r""" ) )
lowerCAmelCase_ : Tuple = {int(lowercase__ ): v for k, v in idalabel.items()}
lowerCAmelCase_ : Optional[int] = {v: k for k, v in idalabel.items()}
lowerCAmelCase_ : Optional[Any] = """std_conv""" if """bit""" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
lowerCAmelCase_ : Tuple = BitConfig(
conv_layer=lowercase__ , num_labels=1000 , idalabel=lowercase__ , labelaid=lowercase__ , )
return config
def __UpperCamelCase ( lowercase__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
if "stem.conv" in name:
lowerCAmelCase_ : str = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
lowerCAmelCase_ : Tuple = name.replace("""blocks""" , """layers""" )
if "head.fc" in name:
lowerCAmelCase_ : Dict = name.replace("""head.fc""" , """classifier.1""" )
if name.startswith("""norm""" ):
lowerCAmelCase_ : List[str] = """bit.""" + name
if "bit" not in name and "classifier" not in name:
lowerCAmelCase_ : Any = """bit.encoder.""" + name
return name
def __UpperCamelCase ( ) -> str:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCAmelCase_ : List[Any] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( lowercase__ : List[str] , lowercase__ : Any , lowercase__ : Any=False ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = get_config(lowercase__ )
# load original model from timm
lowerCAmelCase_ : str = create_model(lowercase__ , pretrained=lowercase__ )
timm_model.eval()
# load state_dict of original model
lowerCAmelCase_ : Any = timm_model.state_dict()
for key in state_dict.copy().keys():
lowerCAmelCase_ : List[str] = state_dict.pop(lowercase__ )
lowerCAmelCase_ : Dict = val.squeeze() if """head""" in key else val
# load HuggingFace model
lowerCAmelCase_ : Tuple = BitForImageClassification(lowercase__ )
model.eval()
model.load_state_dict(lowercase__ )
# create image processor
lowerCAmelCase_ : Tuple = create_transform(**resolve_data_config({} , model=lowercase__ ) )
lowerCAmelCase_ : Union[str, Any] = transform.transforms
lowerCAmelCase_ : str = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
lowerCAmelCase_ : List[str] = BitImageProcessor(
do_resize=lowercase__ , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowercase__ , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=lowercase__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowerCAmelCase_ : int = prepare_img()
lowerCAmelCase_ : Tuple = transform(lowercase__ ).unsqueeze(0 )
lowerCAmelCase_ : List[str] = processor(lowercase__ , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(lowercase__ , lowercase__ )
# verify logits
with torch.no_grad():
lowerCAmelCase_ : Tuple = model(lowercase__ )
lowerCAmelCase_ : List[str] = outputs.logits
print("""Logits:""" , logits[0, :3] )
print("""Predicted class:""" , model.config.idalabel[logits.argmax(-1 ).item()] )
lowerCAmelCase_ : Optional[Any] = timm_model(lowercase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowercase__ , outputs.logits , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(f'Saving model {model_name} and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(lowercase__ )
processor.save_pretrained(lowercase__ )
if push_to_hub:
print(f'Pushing model {model_name} and processor to the hub' )
model.push_to_hub(f'ybelkada/{model_name}' )
processor.push_to_hub(f'ybelkada/{model_name}' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
__UpperCAmelCase = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 28 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__A : int = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''')
__A : List[Any] = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
__A : Any = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __A :
lowerCAmelCase_ : Optional[str] = field(
default="cifar10" , metadata={"help": "Name of a dataset from the datasets package"} )
lowerCAmelCase_ : Optional[str] = field(
default=lowerCAmelCase , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
lowerCAmelCase_ : Optional[str] = field(
default=lowerCAmelCase , metadata={"help": "The column name of the images in the files. If not set, will try to use 'image' or 'img'."} , )
lowerCAmelCase_ : Optional[str] = field(default=lowerCAmelCase , metadata={"help": "A folder containing the training data."} )
lowerCAmelCase_ : Optional[str] = field(default=lowerCAmelCase , metadata={"help": "A folder containing the validation data."} )
lowerCAmelCase_ : Optional[float] = field(
default=0.1_5 , metadata={"help": "Percent to split off of train for validation."} )
lowerCAmelCase_ : int = field(default=32 , metadata={"help": "The size of the square patches to use for masking."} )
lowerCAmelCase_ : float = field(
default=0.6 , metadata={"help": "Percentage of patches to mask."} , )
lowerCAmelCase_ : Optional[int] = field(
default=lowerCAmelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowerCAmelCase_ : Optional[int] = field(
default=lowerCAmelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def lowercase__ ( self : List[str] ):
lowerCAmelCase : List[Any] = {}
if self.train_dir is not None:
lowerCAmelCase : List[Any] = self.train_dir
if self.validation_dir is not None:
lowerCAmelCase : Union[str, Any] = self.validation_dir
lowerCAmelCase : Optional[Any] = data_files if data_files else None
@dataclass
class __A :
lowerCAmelCase_ : str = field(
default=lowerCAmelCase , metadata={
"help": (
"The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a "
"checkpoint identifier on the hub. "
"Don't set if you want to train a model from scratch."
)
} , )
lowerCAmelCase_ : Optional[str] = field(
default=lowerCAmelCase , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(lowerCAmelCase )} , )
lowerCAmelCase_ : Optional[str] = field(
default=lowerCAmelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCAmelCase_ : Optional[str] = field(
default=lowerCAmelCase , metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} , )
lowerCAmelCase_ : Optional[str] = field(
default=lowerCAmelCase , metadata={"help": "Where do you want to store (cache) the pretrained models/datasets downloaded from the hub"} , )
lowerCAmelCase_ : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowerCAmelCase_ : str = field(default=lowerCAmelCase , metadata={"help": "Name or path of preprocessor config."} )
lowerCAmelCase_ : bool = field(
default=lowerCAmelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
lowerCAmelCase_ : Optional[int] = field(
default=lowerCAmelCase , metadata={
"help": (
"The size (resolution) of each image. If not specified, will use `image_size` of the configuration."
)
} , )
lowerCAmelCase_ : Optional[int] = field(
default=lowerCAmelCase , metadata={
"help": (
"The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration."
)
} , )
lowerCAmelCase_ : Optional[int] = field(
default=lowerCAmelCase , metadata={"help": "Stride to use for the encoder."} , )
class __A :
def __init__( self : Tuple , UpperCAmelCase_ : Tuple=192 , UpperCAmelCase_ : int=32 , UpperCAmelCase_ : Union[str, Any]=4 , UpperCAmelCase_ : Any=0.6 ):
lowerCAmelCase : int = input_size
lowerCAmelCase : Any = mask_patch_size
lowerCAmelCase : Dict = model_patch_size
lowerCAmelCase : Tuple = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError('Input size must be divisible by mask patch size' )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError('Mask patch size must be divisible by model patch size' )
lowerCAmelCase : Union[str, Any] = self.input_size // self.mask_patch_size
lowerCAmelCase : List[str] = self.mask_patch_size // self.model_patch_size
lowerCAmelCase : List[str] = self.rand_size**2
lowerCAmelCase : Any = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self : int ):
lowerCAmelCase : Optional[Any] = np.random.permutation(self.token_count )[: self.mask_count]
lowerCAmelCase : Tuple = np.zeros(self.token_count , dtype=UpperCAmelCase_ )
lowerCAmelCase : int = 1
lowerCAmelCase : str = mask.reshape((self.rand_size, self.rand_size) )
lowerCAmelCase : List[Any] = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowerCAmelCase : Tuple = torch.stack([example['pixel_values'] for example in examples] )
lowerCAmelCase : Optional[int] = torch.stack([example['mask'] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
'''simple docstring'''
lowerCAmelCase : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mim', _UpperCAmelCase, _UpperCAmelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout )], )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCAmelCase : List[str] = training_args.get_process_log_level()
logger.setLevel(_UpperCAmelCase )
transformers.utils.logging.set_verbosity(_UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
lowerCAmelCase : int = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase : Union[str, Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
lowerCAmelCase : Optional[int] = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, data_files=data_args.data_files, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
# If we don't have a validation split, split off a percentage of train as validation.
lowerCAmelCase : Union[str, Any] = None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split, _UpperCAmelCase ) and data_args.train_val_split > 0.0:
lowerCAmelCase : Dict = ds['train'].train_test_split(data_args.train_val_split )
lowerCAmelCase : Dict = split['train']
lowerCAmelCase : Union[str, Any] = split['test']
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase : List[Any] = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained(model_args.config_name_or_path, **_UpperCAmelCase )
elif model_args.model_name_or_path:
lowerCAmelCase : List[str] = AutoConfig.from_pretrained(model_args.model_name_or_path, **_UpperCAmelCase )
else:
lowerCAmelCase : Any = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(f"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(f"New config: {config}" )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(_UpperCAmelCase, 'decoder_type' ):
lowerCAmelCase : int = 'simmim'
# adapt config
lowerCAmelCase : Tuple = model_args.image_size if model_args.image_size is not None else config.image_size
lowerCAmelCase : str = model_args.patch_size if model_args.patch_size is not None else config.patch_size
lowerCAmelCase : str = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
'image_size': model_args.image_size,
'patch_size': model_args.patch_size,
'encoder_stride': model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
lowerCAmelCase : Tuple = AutoImageProcessor.from_pretrained(model_args.image_processor_name, **_UpperCAmelCase )
elif model_args.model_name_or_path:
lowerCAmelCase : int = AutoImageProcessor.from_pretrained(model_args.model_name_or_path, **_UpperCAmelCase )
else:
lowerCAmelCase : Dict = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
lowerCAmelCase : Tuple = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
lowerCAmelCase : Any = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path, from_tf=bool('.ckpt' in model_args.model_name_or_path ), config=_UpperCAmelCase, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
else:
logger.info('Training new model from scratch' )
lowerCAmelCase : str = AutoModelForMaskedImageModeling.from_config(_UpperCAmelCase )
if training_args.do_train:
lowerCAmelCase : Optional[Any] = ds['train'].column_names
else:
lowerCAmelCase : str = ds['validation'].column_names
if data_args.image_column_name is not None:
lowerCAmelCase : Tuple = data_args.image_column_name
elif "image" in column_names:
lowerCAmelCase : Tuple = 'image'
elif "img" in column_names:
lowerCAmelCase : Union[str, Any] = 'img'
else:
lowerCAmelCase : List[str] = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
lowerCAmelCase : Any = Compose(
[
Lambda(lambda _UpperCAmelCase : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size, scale=(0.6_7, 1.0), ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean, std=image_processor.image_std ),
] )
# create mask generator
lowerCAmelCase : Optional[Any] = MaskGenerator(
input_size=model_args.image_size, mask_patch_size=data_args.mask_patch_size, model_patch_size=model_args.patch_size, mask_ratio=data_args.mask_ratio, )
def preprocess_images(_UpperCAmelCase ):
lowerCAmelCase : str = [transforms(_UpperCAmelCase ) for image in examples[image_column_name]]
lowerCAmelCase : Union[str, Any] = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
lowerCAmelCase : Tuple = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(_UpperCAmelCase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
lowerCAmelCase : List[str] = (
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(_UpperCAmelCase )
# Initialize our trainer
lowerCAmelCase : Optional[Any] = Trainer(
model=_UpperCAmelCase, args=_UpperCAmelCase, train_dataset=ds['train'] if training_args.do_train else None, eval_dataset=ds['validation'] if training_args.do_eval else None, tokenizer=_UpperCAmelCase, data_collator=_UpperCAmelCase, )
# Training
if training_args.do_train:
lowerCAmelCase : Any = None
if training_args.resume_from_checkpoint is not None:
lowerCAmelCase : Optional[int] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCAmelCase : int = last_checkpoint
lowerCAmelCase : Tuple = trainer.train(resume_from_checkpoint=_UpperCAmelCase )
trainer.save_model()
trainer.log_metrics('train', train_result.metrics )
trainer.save_metrics('train', train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowerCAmelCase : str = trainer.evaluate()
trainer.log_metrics('eval', _UpperCAmelCase )
trainer.save_metrics('eval', _UpperCAmelCase )
# Write model card and (optionally) push to hub
lowerCAmelCase : List[Any] = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'masked-image-modeling',
'dataset': data_args.dataset_name,
'tags': ['masked-image-modeling'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCAmelCase )
else:
trainer.create_model_card(**_UpperCAmelCase )
if __name__ == "__main__":
main()
| 138 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
__A : str = '''pytorch_model.bin'''
__A : Optional[int] = '''pytorch_model.bin.index.json'''
__A : Any = '''adapter_config.json'''
__A : int = '''adapter_model.bin'''
__A : Union[str, Any] = '''adapter_model.safetensors'''
__A : int = '''tf_model.h5'''
__A : Dict = '''tf_model.h5.index.json'''
__A : Dict = '''model.ckpt'''
__A : Optional[int] = '''flax_model.msgpack'''
__A : Tuple = '''flax_model.msgpack.index.json'''
__A : Any = '''model.safetensors'''
__A : Dict = '''model.safetensors.index.json'''
__A : Dict = '''config.json'''
__A : int = '''preprocessor_config.json'''
__A : Optional[Any] = FEATURE_EXTRACTOR_NAME
__A : Any = '''generation_config.json'''
__A : str = '''modelcard.json'''
__A : str = '''▁'''
__A : Union[str, Any] = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
__A : List[str] = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
__A : List[Any] = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
__A : Tuple = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
if version.parse(_UpperCAmelCase ) < version.parse(_UpperCAmelCase ):
if "dev" in min_version:
lowerCAmelCase : Tuple = (
'This example requires a source install from HuggingFace Transformers (see '
'`https://huggingface.co/docs/transformers/installation#install-from-source`),'
)
else:
lowerCAmelCase : Any = f"This example requires a minimum version of {min_version},"
error_message += f" but the version found is {__version__}.\n"
raise ImportError(
error_message
+ 'Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other '
'versions of HuggingFace Transformers.' )
| 138 | 1 |
"""simple docstring"""
from scipy.stats import spearmanr
import datasets
snake_case_ = """
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
snake_case_ = """
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
snake_case_ = R"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self :Optional[Any] ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float' ),
'references': datasets.Value('float' ),
} ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , )
def UpperCAmelCase__ ( self :Dict , lowercase_ :Union[str, Any] , lowercase_ :Optional[int] , lowercase_ :int=False ) -> Any:
UpperCAmelCase = spearmanr(lowercase_ , lowercase_ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 181 |
"""simple docstring"""
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
snake_case_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :Optional[Any] , lowercase_ :CLIPSegForImageSegmentation , lowercase_ :CLIPSegProcessor , lowercase_ :AutoencoderKL , lowercase_ :CLIPTextModel , lowercase_ :CLIPTokenizer , lowercase_ :UNetaDConditionModel , lowercase_ :Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowercase_ :StableDiffusionSafetyChecker , lowercase_ :CLIPImageProcessor , ) -> List[str]:
super().__init__()
if hasattr(scheduler.config , 'steps_offset' ) and scheduler.config.steps_offset != 1:
UpperCAmelCase = (
f"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
f""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
'to update the config accordingly as leaving `steps_offset` might led to incorrect results'
' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'
' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'
' file'
)
deprecate('steps_offset!=1' , '1.0.0' , lowercase_ , standard_warn=lowercase_ )
UpperCAmelCase = dict(scheduler.config )
UpperCAmelCase = 1
UpperCAmelCase = FrozenDict(lowercase_ )
if hasattr(scheduler.config , 'skip_prk_steps' ) and scheduler.config.skip_prk_steps is False:
UpperCAmelCase = (
f"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'
' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'
' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'
' Hub, it would be very nice if you could open a Pull request for the'
' `scheduler/scheduler_config.json` file'
)
deprecate('skip_prk_steps not set' , '1.0.0' , lowercase_ , standard_warn=lowercase_ )
UpperCAmelCase = dict(scheduler.config )
UpperCAmelCase = True
UpperCAmelCase = FrozenDict(lowercase_ )
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' )
self.register_modules(
segmentation_model=lowercase_ , segmentation_processor=lowercase_ , vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , unet=lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_ , feature_extractor=lowercase_ , )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Optional[Union[str, int]] = "auto" ) -> int:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase_ )
def UpperCAmelCase__ ( self :Optional[int] ) -> List[str]:
self.enable_attention_slicing(lowercase_ )
def UpperCAmelCase__ ( self :int ) -> int:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCAmelCase = torch.device('cuda' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(lowercase_ , lowercase_ )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase__ ( self :Optional[Any] ) -> List[str]:
if self.device != torch.device('meta' ) or not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase_ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self :Optional[Any] , lowercase_ :Union[str, List[str]] , lowercase_ :Union[torch.FloatTensor, PIL.Image.Image] , lowercase_ :str , lowercase_ :int = 5_12 , lowercase_ :int = 5_12 , lowercase_ :int = 50 , lowercase_ :float = 7.5 , lowercase_ :Optional[Union[str, List[str]]] = None , lowercase_ :Optional[int] = 1 , lowercase_ :float = 0.0 , lowercase_ :Optional[torch.Generator] = None , lowercase_ :Optional[torch.FloatTensor] = None , lowercase_ :Optional[str] = "pil" , lowercase_ :bool = True , lowercase_ :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowercase_ :int = 1 , **lowercase_ :int , ) -> int:
UpperCAmelCase = self.segmentation_processor(
text=[text] , images=[image] , padding='max_length' , return_tensors='pt' ).to(self.device )
UpperCAmelCase = self.segmentation_model(**lowercase_ )
UpperCAmelCase = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
UpperCAmelCase = self.numpy_to_pil(lowercase_ )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
UpperCAmelCase = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=lowercase_ , image=lowercase_ , mask_image=lowercase_ , height=lowercase_ , width=lowercase_ , num_inference_steps=lowercase_ , guidance_scale=lowercase_ , negative_prompt=lowercase_ , num_images_per_prompt=lowercase_ , eta=lowercase_ , generator=lowercase_ , latents=lowercase_ , output_type=lowercase_ , return_dict=lowercase_ , callback=lowercase_ , callback_steps=lowercase_ , )
| 181 | 1 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _a ( lowerCamelCase: int ) -> int:
'''simple docstring'''
__A = filter(lambda lowerCamelCase : p.requires_grad , model.parameters() )
__A = sum([np.prod(p.size() ) for p in model_parameters] )
return params
snake_case__ : List[Any] = logging.getLogger(__name__)
def _a ( lowerCamelCase: str , lowerCamelCase: List[Any] ) -> Tuple:
'''simple docstring'''
if metric == "rouge2":
__A = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
__A = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
__A = '''{val_avg_em:.4f}-{step_count}'''
elif metric == "loss":
__A = '''{val_avg_loss:.4f}-{step_count}'''
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
''' function.''' )
__A = ModelCheckpoint(
dirpath=lowerCamelCase , filename=lowerCamelCase , monitor=F"""val_{metric}""" , mode='''max''' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def _a ( lowerCamelCase: Any , lowerCamelCase: List[Any] ) -> Tuple:
'''simple docstring'''
return EarlyStopping(
monitor=F"""val_{metric}""" , mode='''min''' if '''loss''' in metric else '''max''' , patience=lowerCamelCase , verbose=lowerCamelCase , )
class A_ ( pl.Callback ):
def _lowerCAmelCase (self :Any , _UpperCamelCase :Dict , _UpperCamelCase :Optional[int] )-> List[str]:
__A = {f"""lr_group_{i}""": param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_UpperCamelCase )
@rank_zero_only
def _lowerCAmelCase (self :Optional[Any] , _UpperCamelCase :pl.Trainer , _UpperCamelCase :pl.LightningModule , _UpperCamelCase :str , _UpperCamelCase :List[str]=True )-> None:
logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
__A = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
__A = Path(pl_module.hparams.output_dir )
if type_path == "test":
__A = od / '''test_results.txt'''
__A = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__A = od / f"""{type_path}_results/{trainer.global_step:05d}.txt"""
__A = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=_UpperCamelCase )
generations_file.parent.mkdir(exist_ok=_UpperCamelCase )
with open(_UpperCamelCase , '''a+''' ) as writer:
for key in sorted(_UpperCamelCase ):
if key in ["log", "progress_bar", "preds"]:
continue
__A = metrics[key]
if isinstance(_UpperCamelCase , torch.Tensor ):
__A = val.item()
__A = f"""{key}: {val:.6f}\n"""
writer.write(_UpperCamelCase )
if not save_generations:
return
if "preds" in metrics:
__A = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(_UpperCamelCase )
@rank_zero_only
def _lowerCAmelCase (self :Optional[Any] , _UpperCamelCase :str , _UpperCamelCase :List[Any] )-> List[Any]:
try:
__A = pl_module.model.model.num_parameters()
except AttributeError:
__A = pl_module.model.num_parameters()
__A = count_trainable_parameters(_UpperCamelCase )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1e6, '''grad_mp''': n_trainable_pars / 1e6} )
@rank_zero_only
def _lowerCAmelCase (self :List[str] , _UpperCamelCase :pl.Trainer , _UpperCamelCase :pl.LightningModule )-> Any:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_UpperCamelCase , _UpperCamelCase , '''test''' )
@rank_zero_only
def _lowerCAmelCase (self :Union[str, Any] , _UpperCamelCase :pl.Trainer , _UpperCamelCase :List[str] )-> Union[str, Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 117 |
import os
snake_case__ : Any = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}
def _a ( lowerCamelCase: str ) -> int:
'''simple docstring'''
__A = 0
__A = 0
while index < len(lowerCamelCase ) - 1:
__A = SYMBOLS[numerals[index]]
__A = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def _a ( lowerCamelCase: int ) -> str:
'''simple docstring'''
__A = ''''''
__A = num // 10_00
numerals += m_count * "M"
num %= 10_00
__A = num // 1_00
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_00
__A = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def _a ( lowerCamelCase: str = "/p089_roman.txt" ) -> int:
'''simple docstring'''
__A = 0
with open(os.path.dirname(lowerCamelCase ) + roman_numerals_filename ) as filea:
__A = filea.readlines()
for line in lines:
__A = line.strip()
__A = parse_roman_numerals(lowerCamelCase )
__A = generate_roman_numerals(lowerCamelCase )
savings += len(lowerCamelCase ) - len(lowerCamelCase )
return savings
if __name__ == "__main__":
print(f'{solution() = }')
| 117 | 1 |
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def _lowerCAmelCase ( A__: int , A__: int , A__: float = 1 / sqrt(2 ) ):
'''simple docstring'''
UpperCAmelCase = tau * frequency / samplerate
UpperCAmelCase = sin(A__ )
UpperCAmelCase = cos(A__ )
UpperCAmelCase = _sin / (2 * q_factor)
UpperCAmelCase = (1 - _cos) / 2
UpperCAmelCase = 1 - _cos
UpperCAmelCase = 1 + alpha
UpperCAmelCase = -2 * _cos
UpperCAmelCase = 1 - alpha
UpperCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _lowerCAmelCase ( A__: int , A__: int , A__: float = 1 / sqrt(2 ) ):
'''simple docstring'''
UpperCAmelCase = tau * frequency / samplerate
UpperCAmelCase = sin(A__ )
UpperCAmelCase = cos(A__ )
UpperCAmelCase = _sin / (2 * q_factor)
UpperCAmelCase = (1 + _cos) / 2
UpperCAmelCase = -1 - _cos
UpperCAmelCase = 1 + alpha
UpperCAmelCase = -2 * _cos
UpperCAmelCase = 1 - alpha
UpperCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _lowerCAmelCase ( A__: int , A__: int , A__: float = 1 / sqrt(2 ) ):
'''simple docstring'''
UpperCAmelCase = tau * frequency / samplerate
UpperCAmelCase = sin(A__ )
UpperCAmelCase = cos(A__ )
UpperCAmelCase = _sin / (2 * q_factor)
UpperCAmelCase = _sin / 2
UpperCAmelCase = 0
UpperCAmelCase = -ba
UpperCAmelCase = 1 + alpha
UpperCAmelCase = -2 * _cos
UpperCAmelCase = 1 - alpha
UpperCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _lowerCAmelCase ( A__: int , A__: int , A__: float = 1 / sqrt(2 ) ):
'''simple docstring'''
UpperCAmelCase = tau * frequency / samplerate
UpperCAmelCase = sin(A__ )
UpperCAmelCase = cos(A__ )
UpperCAmelCase = _sin / (2 * q_factor)
UpperCAmelCase = 1 - alpha
UpperCAmelCase = -2 * _cos
UpperCAmelCase = 1 + alpha
UpperCAmelCase = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def _lowerCAmelCase ( A__: int , A__: int , A__: float , A__: float = 1 / sqrt(2 ) , ):
'''simple docstring'''
UpperCAmelCase = tau * frequency / samplerate
UpperCAmelCase = sin(A__ )
UpperCAmelCase = cos(A__ )
UpperCAmelCase = _sin / (2 * q_factor)
UpperCAmelCase = 10 ** (gain_db / 40)
UpperCAmelCase = 1 + alpha * big_a
UpperCAmelCase = -2 * _cos
UpperCAmelCase = 1 - alpha * big_a
UpperCAmelCase = 1 + alpha / big_a
UpperCAmelCase = -2 * _cos
UpperCAmelCase = 1 - alpha / big_a
UpperCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _lowerCAmelCase ( A__: int , A__: int , A__: float , A__: float = 1 / sqrt(2 ) , ):
'''simple docstring'''
UpperCAmelCase = tau * frequency / samplerate
UpperCAmelCase = sin(A__ )
UpperCAmelCase = cos(A__ )
UpperCAmelCase = _sin / (2 * q_factor)
UpperCAmelCase = 10 ** (gain_db / 40)
UpperCAmelCase = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase = 2 * sqrt(A__ ) * alpha
UpperCAmelCase = big_a * (pmc + aaa)
UpperCAmelCase = 2 * big_a * mpc
UpperCAmelCase = big_a * (pmc - aaa)
UpperCAmelCase = ppmc + aaa
UpperCAmelCase = -2 * pmpc
UpperCAmelCase = ppmc - aaa
UpperCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _lowerCAmelCase ( A__: int , A__: int , A__: float , A__: float = 1 / sqrt(2 ) , ):
'''simple docstring'''
UpperCAmelCase = tau * frequency / samplerate
UpperCAmelCase = sin(A__ )
UpperCAmelCase = cos(A__ )
UpperCAmelCase = _sin / (2 * q_factor)
UpperCAmelCase = 10 ** (gain_db / 40)
UpperCAmelCase = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase = 2 * sqrt(A__ ) * alpha
UpperCAmelCase = big_a * (ppmc + aaa)
UpperCAmelCase = -2 * big_a * pmpc
UpperCAmelCase = big_a * (ppmc - aaa)
UpperCAmelCase = pmc + aaa
UpperCAmelCase = 2 * mpc
UpperCAmelCase = pmc - aaa
UpperCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 353 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 152 | 0 |
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
SCREAMING_SNAKE_CASE :Any = logging.get_logger(__name__)
class __lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self : str , *_lowerCAmelCase : Any , **_lowerCAmelCase : List[str] ) -> None:
"""simple docstring"""
warnings.warn(
"The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PerceiverImageProcessor instead." , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
| 159 |
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def _lowerCAmelCase ( lowerCAmelCase_ :List[str] )->str:
'''simple docstring'''
snake_case_ = []
for line in lines:
snake_case_ = re.sub(r"#.*" , "" , lowerCAmelCase_ ) # remove comments
if line:
filtered_lines.append(lowerCAmelCase_ )
snake_case_ = "\n".join(lowerCAmelCase_ )
# Make a hash from all this code
snake_case_ = full_str.encode("utf-8" )
return shaaaa(lowerCAmelCase_ ).hexdigest()
# get importable module names and hash for caching
SCREAMING_SNAKE_CASE :Union[str, Any] = {
'''csv''': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'''json''': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'''pandas''': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'''parquet''': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'''arrow''': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'''text''': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'''imagefolder''': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'''audiofolder''': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
SCREAMING_SNAKE_CASE :str = {
'''.csv''': ('''csv''', {}),
'''.tsv''': ('''csv''', {'''sep''': '''\t'''}),
'''.json''': ('''json''', {}),
'''.jsonl''': ('''json''', {}),
'''.parquet''': ('''parquet''', {}),
'''.arrow''': ('''arrow''', {}),
'''.txt''': ('''text''', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
SCREAMING_SNAKE_CASE :List[Any] = {'''imagefolder''', '''audiofolder'''}
# Used to filter data files based on extensions given a module name
SCREAMING_SNAKE_CASE :Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('''.zip''')
_MODULE_TO_EXTENSIONS["audiofolder"].append('''.zip''')
| 159 | 1 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def _lowerCamelCase( lowercase__ ) -> Tuple:
'''simple docstring'''
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class A ( nn.Module ):
def __init__(self , lowerCAmelCase , lowerCAmelCase ):
super().__init__()
__lowercase= module
__lowercase= nn.Sequential(
nn.Linear(module.in_features , lowerCAmelCase , bias=lowerCAmelCase ) , nn.Linear(lowerCAmelCase , module.out_features , bias=lowerCAmelCase ) , )
__lowercase= (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=lowerCAmelCase )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def _A (self , lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase ):
return self.module(lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase ) + self.adapter(lowerCAmelCase )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A ( unittest.TestCase ):
UpperCamelCase_ : Tuple ='''bigscience/bloom-1b7'''
# Constant values
UpperCamelCase_ : Optional[int] =2.109659552692574
UpperCamelCase_ : Optional[int] ='''Hello my name is'''
UpperCamelCase_ : Optional[Any] =set()
EXPECTED_OUTPUTS.add('''Hello my name is John and I am a professional photographer. I''' )
EXPECTED_OUTPUTS.add('''Hello my name is John.\nI am a friend of your father.\n''' )
EXPECTED_OUTPUTS.add('''Hello my name is John Doe, I am a student at the University''' )
UpperCamelCase_ : str =10
def _A (self ):
# Models and tokenizer
__lowercase= AutoTokenizer.from_pretrained(self.model_name )
class A ( A_ ):
def _A (self ):
super().setUp()
# Models and tokenizer
__lowercase= AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto' )
__lowercase= AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase , device_map='auto' )
def _A (self ):
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def _A (self ):
__lowercase= self.model_abit.config
self.assertTrue(hasattr(lowerCAmelCase , 'quantization_config' ) )
__lowercase= config.to_dict()
__lowercase= config.to_diff_dict()
__lowercase= config.to_json_string()
def _A (self ):
from bitsandbytes.nn import Paramsabit
__lowercase= self.model_fpaa.get_memory_footprint()
__lowercase= self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
__lowercase= get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def _A (self ):
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(lowerCAmelCase , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def _A (self ):
__lowercase= self.tokenizer(self.input_text , return_tensors='pt' )
__lowercase= self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase ) , self.EXPECTED_OUTPUTS )
def _A (self ):
__lowercase= BitsAndBytesConfig()
__lowercase= True
__lowercase= AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCAmelCase , device_map='auto' )
__lowercase= self.tokenizer(self.input_text , return_tensors='pt' )
__lowercase= model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase ) , self.EXPECTED_OUTPUTS )
def _A (self ):
with self.assertRaises(lowerCAmelCase ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(lowerCAmelCase )
def _A (self ):
__lowercase= BitsAndBytesConfig()
with self.assertRaises(lowerCAmelCase ):
__lowercase= AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCAmelCase , load_in_abit=lowerCAmelCase , device_map='auto' , bnb_abit_quant_type='nf4' , )
def _A (self ):
with self.assertRaises(lowerCAmelCase ):
# Tries with `str`
self.model_abit.to('cpu' )
with self.assertRaises(lowerCAmelCase ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(lowerCAmelCase ):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0' ) )
with self.assertRaises(lowerCAmelCase ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(lowerCAmelCase ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__lowercase= self.tokenizer(self.input_text , return_tensors='pt' )
__lowercase= self.model_fpaa.to(torch.floataa )
__lowercase= self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=1_0 )
# Check this does not throw an error
__lowercase= self.model_fpaa.to('cpu' )
# Check this does not throw an error
__lowercase= self.model_fpaa.half()
# Check this does not throw an error
__lowercase= self.model_fpaa.float()
def _A (self ):
__lowercase= AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=lowerCAmelCase , device_map='auto' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A ( unittest.TestCase ):
@classmethod
def _A (cls ):
__lowercase= 't5-small'
__lowercase= 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
__lowercase= AutoTokenizer.from_pretrained(cls.model_name )
__lowercase= 'Translate in German: Hello, my dog is cute'
def _A (self ):
gc.collect()
torch.cuda.empty_cache()
def _A (self ):
from transformers import TaForConditionalGeneration
__lowercase= TaForConditionalGeneration._keep_in_fpaa_modules
__lowercase= None
# test with `t5-small`
__lowercase= TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase , device_map='auto' )
__lowercase= self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__lowercase= model.generate(**lowerCAmelCase )
# test with `flan-t5-small`
__lowercase= TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCAmelCase , device_map='auto' )
__lowercase= self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__lowercase= model.generate(**lowerCAmelCase )
__lowercase= modules
def _A (self ):
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__lowercase= TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase , device_map='auto' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
__lowercase= self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__lowercase= model.generate(**lowerCAmelCase )
# test with `flan-t5-small`
__lowercase= TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCAmelCase , device_map='auto' )
__lowercase= self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__lowercase= model.generate(**lowerCAmelCase )
class A ( A_ ):
def _A (self ):
super().setUp()
# model_name
__lowercase= 'bigscience/bloom-560m'
__lowercase= 't5-small'
# Different types of model
__lowercase= AutoModel.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase , device_map='auto' )
# Sequence classification model
__lowercase= AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=lowerCAmelCase , device_map='auto' )
# CausalLM model
__lowercase= AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase , device_map='auto' )
# Seq2seq model
__lowercase= AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=lowerCAmelCase , device_map='auto' )
def _A (self ):
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def _A (self ):
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class A ( A_ ):
def _A (self ):
super().setUp()
def _A (self ):
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def _A (self ):
__lowercase= pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__lowercase= self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class A ( A_ ):
def _A (self ):
super().setUp()
def _A (self ):
__lowercase= AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=lowerCAmelCase , device_map='balanced' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
__lowercase= self.tokenizer(self.input_text , return_tensors='pt' )
# Second real batch
__lowercase= model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=lowerCAmelCase ) , self.EXPECTED_OUTPUTS )
class A ( A_ ):
def _A (self ):
__lowercase= 'facebook/opt-350m'
super().setUp()
def _A (self ):
if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ):
return
# Step 1: freeze all parameters
__lowercase= AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
__lowercase= False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__lowercase= param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(lowerCAmelCase ) ):
__lowercase= LoRALayer(module.q_proj , rank=1_6 )
__lowercase= LoRALayer(module.k_proj , rank=1_6 )
__lowercase= LoRALayer(module.v_proj , rank=1_6 )
# Step 3: dummy batch
__lowercase= self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__lowercase= model.forward(**lowerCAmelCase )
out.logits.norm().backward()
for module in model.modules():
if isinstance(lowerCAmelCase , lowerCAmelCase ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(lowerCAmelCase , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class A ( A_ ):
UpperCamelCase_ : Optional[Any] ='''gpt2-xl'''
UpperCamelCase_ : List[Any] =3.3191854854152187
| 362 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase = {
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 304 | 0 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
a_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
a_ = ''' def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
'''
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , "models/bert/" ) )
lowerCAmelCase__ = self.transformer_dir
shutil.copy(
os.path.join(_UpperCAmelCase , "src/transformers/models/bert/modeling_bert.py" ) , os.path.join(self.transformer_dir , "models/bert/modeling_bert.py" ) , )
def UpperCAmelCase ( self )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = '''src/transformers'''
shutil.rmtree(self.transformer_dir )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None )-> str:
'''simple docstring'''
lowerCAmelCase__ = comment + F"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
lowerCAmelCase__ = comment + F"\nclass {class_name}(nn.Module):\n" + overwrite_result
lowerCAmelCase__ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowerCAmelCase__ = black.format_str(_UpperCAmelCase , mode=_UpperCAmelCase )
lowerCAmelCase__ = os.path.join(self.transformer_dir , "new_code.py" )
with open(_UpperCAmelCase , "w" , newline="\n" ) as f:
f.write(_UpperCAmelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_UpperCAmelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_UpperCAmelCase )
with open(_UpperCAmelCase , "r" ) as f:
self.assertTrue(f.read() , _UpperCAmelCase )
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
lowerCAmelCase__ = check_copies.find_code_in_transformers("models.bert.modeling_bert.BertLMPredictionHead" )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , _UpperCAmelCase , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , re.sub("Bert" , "TestModel" , _UpperCAmelCase ) , )
# Copy consistency with a really long name
lowerCAmelCase__ = '''TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}" , F"{long_class_name}LMPredictionHead" , re.sub("Bert" , _UpperCAmelCase , _UpperCAmelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , _UpperCAmelCase , overwrite_result=re.sub("Bert" , "TestModel" , _UpperCAmelCase ) , )
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = check_copies.LOCALIZED_READMES['''README_zh-hans.md''']
lowerCAmelCase__ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'''
''' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'''
''' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'''
''' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'''
''' Luong, Quoc V. Le, Christopher D. Manning.'''
)
lowerCAmelCase__ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
lowerCAmelCase__ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'''
''' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'''
''' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'''
''' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'''
''' Christopher D. Manning 发布。\n'''
)
lowerCAmelCase__ = check_copies.convert_to_localized_md(
_UpperCAmelCase , _UpperCAmelCase , localized_readme["format_model_list"] )
self.assertFalse(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase__ = check_copies.convert_to_localized_md(
_UpperCAmelCase , _UpperCAmelCase , localized_readme["format_model_list"] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(_UpperCAmelCase )
lowerCAmelCase__ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'''
)
lowerCAmelCase__ = (
'''1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'''
''' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
lowerCAmelCase__ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
lowerCAmelCase__ = check_copies.convert_to_localized_md(
_UpperCAmelCase , _UpperCAmelCase , localized_readme["format_model_list"] )
# Check if the model link is synchronized.
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
| 340 |
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
A = logging.get_logger(__name__)
def __A ( a_ :Tuple) -> Tuple:
__a : int = torch.load(a_ , map_location='''cpu''')
if "model" in sd.keys():
__a : Optional[Any] = torch.load(a_ , map_location='''cpu''')['''model''']
# pop unnecessary weights
__a : Optional[Any] = [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(a_)
__a : Tuple = {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
__a : Tuple = sd.pop(a_)
__a : List[Any] = list(sd.keys())
for key in keys:
if ".qkv_proj." in key:
__a : List[str] = sd[key]
# We split QKV in separate Q,K,V
__a : Optional[int] = key.replace('''.qkv_proj.''' , '''.q_proj.''')
__a : List[Any] = key.replace('''.qkv_proj.''' , '''.k_proj.''')
__a : Any = key.replace('''.qkv_proj.''' , '''.v_proj.''')
__a : Any = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
__a , __a , __a : Dict = torch.split(a_ , depth // 3 , dim=0)
__a : Tuple = q
__a : Optional[Any] = k
__a : Tuple = v
del sd[key]
return sd
@torch.no_grad()
def __A ( a_ :str , a_ :Tuple , a_ :Any=None) -> List[str]:
__a : str = load_checkpoint(a_)
if config is not None:
__a : Union[str, Any] = OPTConfig.from_pretrained(a_)
else:
__a : Any = OPTConfig()
__a : List[str] = OPTModel(a_).half().eval()
model.load_state_dict(a_)
# Check results
Path(a_).mkdir(exist_ok=a_)
model.save_pretrained(a_)
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fairseq_path''',
type=str,
help=(
'''path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'''
''' https://huggingface.co/models?other=opt_metasq'''
),
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--hf_config''', default=None, type=str, help='''Define HF config.''')
A = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config) | 160 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase__ = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 370 |
"""simple docstring"""
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def __lowerCamelCase ( __UpperCamelCase ) -> Any:
"""simple docstring"""
if not is_accelerate_available():
return method
lowerCAmelCase_ : Union[str, Any] = version.parse(accelerate.__version__ ).base_version
if version.parse(__UpperCamelCase ) < version.parse("0.17.0" ):
return method
def wrapper(self , *__UpperCamelCase , **__UpperCamelCase ):
if hasattr(self , "_hf_hook" ) and hasattr(self._hf_hook , "pre_forward" ):
self._hf_hook.pre_forward(self )
return method(self , *__UpperCamelCase , **__UpperCamelCase )
return wrapper
| 161 | 0 |
from __future__ import annotations
def _snake_case ( lowerCAmelCase : int | str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = str(lowerCAmelCase )
return n == n[::-1]
def _snake_case ( lowerCAmelCase : int = 1_0_0_0_0_0_0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = 0
for i in range(1 , lowerCAmelCase ):
if is_palindrome(lowerCAmelCase ) and is_palindrome(bin(lowerCAmelCase ).split("b" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 18 | import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class a__ ( A__ ):
def __init__( self : Tuple,_A : Optional[int],_A : Any=13,_A : List[str]=7,_A : int=True,_A : Dict=True,_A : Dict=False,_A : List[Any]=True,_A : Any=99,_A : Optional[int]=32,_A : Any=5,_A : List[Any]=4,_A : Dict=64,_A : Optional[Any]="gelu",_A : Tuple=0.1,_A : Any=0.1,_A : List[Any]=512,_A : Dict=16,_A : Optional[Any]=2,_A : Union[str, Any]=0.02,_A : List[str]=3,_A : Optional[Any]=4,_A : Union[str, Any]=None,_A : Tuple=2,_A : List[str]=2,_A : str=2,_A : Dict=2,_A : Optional[Any]=4,_A : Union[str, Any]=1,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = parent
SCREAMING_SNAKE_CASE_ : Optional[int] = batch_size
SCREAMING_SNAKE_CASE_ : Dict = seq_length
SCREAMING_SNAKE_CASE_ : Dict = is_training
SCREAMING_SNAKE_CASE_ : Optional[int] = use_input_mask
SCREAMING_SNAKE_CASE_ : int = use_token_type_ids
SCREAMING_SNAKE_CASE_ : Optional[int] = use_labels
SCREAMING_SNAKE_CASE_ : Tuple = vocab_size
SCREAMING_SNAKE_CASE_ : Any = hidden_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE_ : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : List[str] = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : str = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = type_vocab_size
SCREAMING_SNAKE_CASE_ : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE_ : Tuple = num_labels
SCREAMING_SNAKE_CASE_ : List[Any] = num_choices
SCREAMING_SNAKE_CASE_ : Dict = scope
SCREAMING_SNAKE_CASE_ : int = q_groups
SCREAMING_SNAKE_CASE_ : Tuple = k_groups
SCREAMING_SNAKE_CASE_ : List[Any] = v_groups
SCREAMING_SNAKE_CASE_ : Tuple = post_attention_groups
SCREAMING_SNAKE_CASE_ : int = intermediate_groups
SCREAMING_SNAKE_CASE_ : List[Any] = output_groups
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
SCREAMING_SNAKE_CASE_ : List[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : str = ids_tensor([self.batch_size],self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size],self.num_choices )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : str ):
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size,vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,attention_probs_dropout_prob=self.hidden_dropout_prob,attention_dropout=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,initializer_range=self.initializer_range,q_groups=self.q_groups,k_groups=self.k_groups,v_groups=self.v_groups,post_attention_groups=self.post_attention_groups,intermediate_groups=self.intermediate_groups,output_groups=self.output_groups,)
def __UpperCamelCase ( self : Tuple,_A : Union[str, Any],_A : Union[str, Any],_A : int,_A : Optional[int],_A : Union[str, Any],_A : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = SqueezeBertModel(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Any = model(_A,_A )
SCREAMING_SNAKE_CASE_ : List[str] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Dict,_A : Any,_A : Tuple,_A : str,_A : Any,_A : Union[str, Any],_A : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = SqueezeBertForMaskedLM(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : List[str] = model(_A,attention_mask=_A,labels=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : Optional[int],_A : Union[str, Any],_A : Union[str, Any],_A : Any,_A : Tuple,_A : int,_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = SqueezeBertForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(
_A,attention_mask=_A,start_positions=_A,end_positions=_A )
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : List[Any],_A : List[str],_A : Tuple,_A : List[Any],_A : List[str],_A : List[str],_A : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.num_labels
SCREAMING_SNAKE_CASE_ : List[str] = SqueezeBertForSequenceClassification(_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(_A,attention_mask=_A,labels=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : str,_A : Optional[int],_A : str,_A : List[Any],_A : List[str],_A : str,_A : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Optional[int] = SqueezeBertForTokenClassification(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[int] = model(_A,attention_mask=_A,labels=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : List[Any],_A : Tuple,_A : str,_A : Optional[Any],_A : int,_A : str,_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.num_choices
SCREAMING_SNAKE_CASE_ : Union[str, Any] = SqueezeBertForMultipleChoice(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = input_ids.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous()
SCREAMING_SNAKE_CASE_ : str = input_mask.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous()
SCREAMING_SNAKE_CASE_ : Optional[int] = model(
_A,attention_mask=_A,labels=_A,)
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)) : Dict = config_and_inputs
SCREAMING_SNAKE_CASE_ : Dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class a__ ( A__ , A__ , unittest.TestCase ):
A = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
A = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A = False
A = True
A = False
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = SqueezeBertModelTester(self )
SCREAMING_SNAKE_CASE_ : List[str] = ConfigTester(self,config_class=_A,dim=37 )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*_A )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*_A )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*_A )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_A )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*_A )
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_A )
@slow
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Tuple = SqueezeBertModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_sentencepiece
@require_tokenizers
@require_torch
class a__ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli" )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([[1, 2_9414, 232, 328, 740, 1140, 1_2695, 69, 13, 1588, 2]] )
SCREAMING_SNAKE_CASE_ : List[Any] = model(_A )[0]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.Size((1, 3) )
self.assertEqual(output.shape,_A )
SCREAMING_SNAKE_CASE_ : int = torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(_A,_A,atol=1E-4 ) )
| 18 | 1 |
'''simple docstring'''
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def a_ ( _UpperCAmelCase : Any ,_UpperCAmelCase : Any ,_UpperCAmelCase : Dict ,_UpperCAmelCase : Dict ,_UpperCAmelCase : Any ) -> Union[str, Any]:
# load base model
__snake_case : Optional[Any] = StableDiffusionPipeline.from_pretrained(_UpperCAmelCase ,torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
__snake_case : List[Any] = load_file(_UpperCAmelCase )
__snake_case : Optional[int] = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
__snake_case : Union[str, Any] = key.split('.' )[0].split(LORA_PREFIX_TEXT_ENCODER + '_' )[-1].split('_' )
__snake_case : List[str] = pipeline.text_encoder
else:
__snake_case : Union[str, Any] = key.split('.' )[0].split(LORA_PREFIX_UNET + '_' )[-1].split('_' )
__snake_case : List[str] = pipeline.unet
# find the target layer
__snake_case : Union[str, Any] = layer_infos.pop(0 )
while len(_UpperCAmelCase ) > -1:
try:
__snake_case : List[str] = curr_layer.__getattr__(_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
__snake_case : Tuple = layer_infos.pop(0 )
elif len(_UpperCAmelCase ) == 0:
break
except Exception:
if len(_UpperCAmelCase ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
__snake_case : Optional[int] = layer_infos.pop(0 )
__snake_case : Tuple = []
if "lora_down" in key:
pair_keys.append(key.replace('lora_down' ,'lora_up' ) )
pair_keys.append(_UpperCAmelCase )
else:
pair_keys.append(_UpperCAmelCase )
pair_keys.append(key.replace('lora_up' ,'lora_down' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
__snake_case : Optional[Any] = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
__snake_case : List[str] = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(_UpperCAmelCase ,_UpperCAmelCase ).unsqueeze(2 ).unsqueeze(3 )
else:
__snake_case : str = state_dict[pair_keys[0]].to(torch.floataa )
__snake_case : Tuple = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(_UpperCAmelCase ,_UpperCAmelCase )
# update visited list
for item in pair_keys:
visited.append(_UpperCAmelCase )
return pipeline
if __name__ == "__main__":
A__ : str = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_path''', default=None, type=str, required=True, help='''Path to the base model in diffusers format.'''
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--lora_prefix_unet''', default='''lora_unet''', type=str, help='''The prefix of UNet weight in safetensors'''
)
parser.add_argument(
'''--lora_prefix_text_encoder''',
default='''lora_te''',
type=str,
help='''The prefix of text encoder weight in safetensors''',
)
parser.add_argument('''--alpha''', default=0.75, type=float, help='''The merging ratio in W = W0 + alpha * deltaW''')
parser.add_argument(
'''--to_safetensors''', action='''store_true''', help='''Whether to store pipeline in safetensors format or not.'''
)
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
A__ : str = parser.parse_args()
A__ : Optional[Any] = args.base_model_path
A__ : int = args.checkpoint_path
A__ : str = args.dump_path
A__ : Tuple = args.lora_prefix_unet
A__ : Optional[Any] = args.lora_prefix_text_encoder
A__ : Optional[Any] = args.alpha
A__ : int = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
A__ : int = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 366 |
'''simple docstring'''
from __future__ import annotations
import time
import numpy as np
A__ : str = [8, 5, 9, 7]
A__ : List[str] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
A__ : Dict = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class snake_case__ :
def __init__( self : Union[str, Any] , __a : list[int] , __a : list[list[int]] , __a : list[list[int]] , ) -> None:
'''simple docstring'''
__snake_case : int = claim_vector
__snake_case : Optional[int] = allocated_resources_table
__snake_case : List[str] = maximum_claim_table
def A_ ( self : str ) -> list[int]:
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def A_ ( self : int ) -> list[int]:
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def A_ ( self : int ) -> list[list[int]]:
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__a ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def A_ ( self : str ) -> dict[int, list[int]]:
'''simple docstring'''
return {self.__need().index(__a ): i for i in self.__need()}
def A_ ( self : Union[str, Any] , **__a : int ) -> None:
'''simple docstring'''
__snake_case : str = self.__need()
__snake_case : List[Any] = self.__allocated_resources_table
__snake_case : Optional[int] = self.__available_resources()
__snake_case : Union[str, Any] = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('_' * 50 + '\n' )
while need_list:
__snake_case : Tuple = False
for each_need in need_list:
__snake_case : Any = True
for index, need in enumerate(__a ):
if need > available_resources[index]:
__snake_case : List[str] = False
break
if execution:
__snake_case : Union[str, Any] = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__snake_case : str = original_need_index
print(f'''Process {process_number + 1} is executing.''' )
# remove the process run from stack
need_list.remove(__a )
# update available/freed resources stack
__snake_case : Union[str, Any] = np.array(__a ) + np.array(
alloc_resources_table[process_number] )
print(
'Updated available resource stack for processes: '
+ ' '.join([str(__a ) for x in available_resources] ) )
break
if safe:
print('The process is in a safe state.\n' )
else:
print('System in unsafe state. Aborting...\n' )
break
def A_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
print(' ' * 9 + 'Allocated Resource Table' )
for item in self.__allocated_resources_table:
print(
f'''P{self.__allocated_resources_table.index(__a ) + 1}'''
+ ' '.join(f'''{it:>8}''' for it in item )
+ '\n' )
print(' ' * 9 + 'System Resource Table' )
for item in self.__maximum_claim_table:
print(
f'''P{self.__maximum_claim_table.index(__a ) + 1}'''
+ ' '.join(f'''{it:>8}''' for it in item )
+ '\n' )
print(
'Current Usage by Active Processes: '
+ ' '.join(str(__a ) for x in self.__claim_vector ) )
print(
'Initial Available Resources: '
+ ' '.join(str(__a ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 0 |
from __future__ import annotations
from collections.abc import Iterator
class lowercase :
def __init__( self ,A__):
lowercase = value
lowercase = None
lowercase = None
class lowercase :
def __init__( self ,A__):
lowercase = tree
def A__ ( self ,A__):
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left) + self.depth_first_search(node.right)
)
def __iter__( self):
yield self.depth_first_search(self.tree)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101 | import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'BridgeTower/bridgetower-base': 'https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json',
'BridgeTower/bridgetower-base-itm-mlm': (
'https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'
),
}
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Optional[Any] = '''bridgetower_vision_model'''
def __init__( self , lowerCAmelCase_=7_68 , lowerCAmelCase_=12 , lowerCAmelCase_=3 , lowerCAmelCase_=16 , lowerCAmelCase_=2_88 , lowerCAmelCase_=1 , lowerCAmelCase_=1E-05 , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=False , **lowerCAmelCase_ , ) -> Any:
super().__init__(**lowerCAmelCase_ )
_A = hidden_size
_A = num_hidden_layers
_A = num_channels
_A = patch_size
_A = image_size
_A = initializer_factor
_A = layer_norm_eps
_A = stop_gradient
_A = share_layernorm
_A = remove_last_layer
@classmethod
def UpperCAmelCase ( cls , lowerCAmelCase_ , **lowerCAmelCase_ ) -> "PretrainedConfig":
_A , _A = cls.get_config_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
if config_dict.get("""model_type""" ) == "bridgetower":
_A = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Union[str, Any] = '''bridgetower_text_model'''
def __init__( self , lowerCAmelCase_=5_02_65 , lowerCAmelCase_=7_68 , lowerCAmelCase_=12 , lowerCAmelCase_=12 , lowerCAmelCase_=1 , lowerCAmelCase_=30_72 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=5_14 , lowerCAmelCase_=1 , lowerCAmelCase_=1E-05 , lowerCAmelCase_=1 , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_="absolute" , lowerCAmelCase_=True , **lowerCAmelCase_ , ) -> Optional[Any]:
super().__init__(**lowerCAmelCase_ )
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = hidden_act
_A = initializer_factor
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = layer_norm_eps
_A = position_embedding_type
_A = use_cache
_A = pad_token_id
_A = bos_token_id
_A = eos_token_id
@classmethod
def UpperCAmelCase ( cls , lowerCAmelCase_ , **lowerCAmelCase_ ) -> "PretrainedConfig":
_A , _A = cls.get_config_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
if config_dict.get("""model_type""" ) == "bridgetower":
_A = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Optional[int] = '''bridgetower'''
def __init__( self , lowerCAmelCase_=True , lowerCAmelCase_="gelu" , lowerCAmelCase_=7_68 , lowerCAmelCase_=1 , lowerCAmelCase_=1E-05 , lowerCAmelCase_=False , lowerCAmelCase_="add" , lowerCAmelCase_=12 , lowerCAmelCase_=6 , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , ) -> int:
# TODO: remove this once the Hub files are updated.
_A = kwargs.pop("""text_config_dict""" , lowerCAmelCase_ )
_A = kwargs.pop("""vision_config_dict""" , lowerCAmelCase_ )
super().__init__(**lowerCAmelCase_ )
_A = share_cross_modal_transformer_layers
_A = hidden_act
_A = hidden_size
_A = initializer_factor
_A = layer_norm_eps
_A = share_link_tower_layers
_A = link_tower_type
_A = num_attention_heads
_A = num_hidden_layers
_A = tie_word_embeddings
_A = init_layernorm_from_vision_encoder
if text_config is None:
_A = {}
logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" )
if vision_config is None:
_A = {}
logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" )
_A = BridgeTowerTextConfig(**lowerCAmelCase_ )
_A = BridgeTowerVisionConfig(**lowerCAmelCase_ )
@classmethod
def UpperCAmelCase ( cls , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ) -> Any:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = copy.deepcopy(self.__dict__ )
_A = self.text_config.to_dict()
_A = self.vision_config.to_dict()
_A = self.__class__.model_type
return output
| 180 | 0 |
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _snake_case :
def __init__( self , _a , _a=13 , _a=30 , _a=2 , _a=3 , _a=True , _a=True , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=10 , _a=0.02 , _a=None , _a=2 , ):
__magic_name__ : Dict = parent
__magic_name__ : int = batch_size
__magic_name__ : int = image_size
__magic_name__ : Optional[Any] = patch_size
__magic_name__ : Optional[int] = num_channels
__magic_name__ : List[Any] = is_training
__magic_name__ : Dict = use_labels
__magic_name__ : Optional[Any] = hidden_size
__magic_name__ : List[str] = num_hidden_layers
__magic_name__ : List[Any] = num_attention_heads
__magic_name__ : Optional[Any] = intermediate_size
__magic_name__ : Any = hidden_act
__magic_name__ : Union[str, Any] = hidden_dropout_prob
__magic_name__ : Tuple = attention_probs_dropout_prob
__magic_name__ : List[Any] = type_sequence_label_size
__magic_name__ : Optional[Any] = initializer_range
__magic_name__ : List[str] = scope
__magic_name__ : int = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__magic_name__ : Union[str, Any] = (image_size // patch_size) ** 2
__magic_name__ : Any = num_patches + 1
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ : Optional[Any] = None
if self.use_labels:
__magic_name__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Optional[int] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self ):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a ):
__magic_name__ : Union[str, Any] = ViTModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__magic_name__ : Optional[Any] = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a ):
__magic_name__ : Tuple = ViTForMaskedImageModeling(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__magic_name__ : Any = model(_lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__magic_name__ : List[Any] = 1
__magic_name__ : int = ViTForMaskedImageModeling(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__magic_name__ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__magic_name__ : List[str] = model(_lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a ):
__magic_name__ : Optional[int] = self.type_sequence_label_size
__magic_name__ : Optional[Any] = ViTForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__magic_name__ : List[Any] = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__magic_name__ : str = 1
__magic_name__ : Optional[Any] = ViTForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__magic_name__ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__magic_name__ : List[Any] = model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Any = self.prepare_config_and_inputs()
(
__magic_name__
) : Dict = config_and_inputs
__magic_name__ : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( a__ , a__ , unittest.TestCase ):
UpperCamelCase__ = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
UpperCamelCase__ = (
{'feature-extraction': ViTModel, 'image-classification': ViTForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase__ = True
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Union[str, Any] = ViTModelTester(self )
__magic_name__ : str = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE ( self ):
pass
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : Any = model_class(_lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__magic_name__ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCamelCase , nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : List[Any] = model_class(_lowerCamelCase )
__magic_name__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ : Optional[int] = [*signature.parameters.keys()]
__magic_name__ : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
@slow
def SCREAMING_SNAKE_CASE ( self ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : Optional[int] = ViTModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def lowerCAmelCase_ ( ) -> Optional[int]:
'''simple docstring'''
__magic_name__ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self ):
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ).to(_lowerCamelCase )
__magic_name__ : int = self.default_image_processor
__magic_name__ : Optional[Any] = prepare_img()
__magic_name__ : Optional[Any] = image_processor(images=_lowerCamelCase , return_tensors="pt" ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
__magic_name__ : List[Any] = model(**_lowerCamelCase )
# verify the logits
__magic_name__ : Optional[int] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
__magic_name__ : Any = torch.tensor([-0.27_44, 0.82_15, -0.08_36] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : str = ViTModel.from_pretrained("facebook/dino-vits8" ).to(_lowerCamelCase )
__magic_name__ : List[Any] = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=480 )
__magic_name__ : Optional[Any] = prepare_img()
__magic_name__ : List[Any] = image_processor(images=_lowerCamelCase , return_tensors="pt" )
__magic_name__ : Optional[int] = inputs.pixel_values.to(_lowerCamelCase )
# forward pass
with torch.no_grad():
__magic_name__ : Union[str, Any] = model(_lowerCamelCase , interpolate_pos_encoding=_lowerCamelCase )
# verify the logits
__magic_name__ : str = torch.Size((1, 3_601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , _lowerCamelCase )
__magic_name__ : List[Any] = torch.tensor(
[[4.23_40, 4.39_06, -6.66_92], [4.54_63, 1.89_28, -6.72_57], [4.44_29, 0.84_96, -5.85_85]] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , _lowerCamelCase , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Tuple = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto" )
__magic_name__ : Dict = self.default_image_processor
__magic_name__ : str = prepare_img()
__magic_name__ : Any = image_processor(images=_lowerCamelCase , return_tensors="pt" )
__magic_name__ : int = inputs.pixel_values.to(_lowerCamelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
__magic_name__ : List[str] = model(_lowerCamelCase )
| 354 |
def lowerCAmelCase_ ( _snake_case : int , _snake_case : Optional[int] ) -> List[Any]:
'''simple docstring'''
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def lowerCAmelCase_ ( _snake_case : List[Any] , _snake_case : Optional[Any]=0 ) -> str:
'''simple docstring'''
return sorted(_snake_case , key=lambda _snake_case : x[column] )
def lowerCAmelCase_ ( _snake_case : List[Any] , _snake_case : Any , _snake_case : Optional[int]=float("inf" ) ) -> Tuple:
'''simple docstring'''
for i in range(points_counts - 1 ):
for j in range(i + 1 , _snake_case ):
__magic_name__ : List[str] = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__magic_name__ : Any = current_dis
return min_dis
def lowerCAmelCase_ ( _snake_case : List[Any] , _snake_case : str , _snake_case : str=float("inf" ) ) -> Dict:
'''simple docstring'''
for i in range(min(6 , points_counts - 1 ) , _snake_case ):
for j in range(max(0 , i - 6 ) , _snake_case ):
__magic_name__ : str = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__magic_name__ : List[str] = current_dis
return min_dis
def lowerCAmelCase_ ( _snake_case : List[Any] , _snake_case : Union[str, Any] , _snake_case : Any ) -> List[Any]:
'''simple docstring'''
if points_counts <= 3:
return dis_between_closest_pair(_snake_case , _snake_case )
# recursion
__magic_name__ : Tuple = points_counts // 2
__magic_name__ : Dict = closest_pair_of_points_sqr(
_snake_case , points_sorted_on_y[:mid] , _snake_case )
__magic_name__ : Optional[int] = closest_pair_of_points_sqr(
_snake_case , points_sorted_on_y[mid:] , points_counts - mid )
__magic_name__ : int = min(_snake_case , _snake_case )
__magic_name__ : Optional[int] = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(_snake_case )
__magic_name__ : Tuple = dis_between_closest_in_strip(
_snake_case , len(_snake_case ) , _snake_case )
return min(_snake_case , _snake_case )
def lowerCAmelCase_ ( _snake_case : List[Any] , _snake_case : Optional[int] ) -> Dict:
'''simple docstring'''
__magic_name__ : Union[str, Any] = column_based_sort(_snake_case , column=0 )
__magic_name__ : List[Any] = column_based_sort(_snake_case , column=1 )
return (
closest_pair_of_points_sqr(
_snake_case , _snake_case , _snake_case )
) ** 0.5
if __name__ == "__main__":
snake_case : List[str] = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print("Distance:", closest_pair_of_points(points, len(points)))
| 41 | 0 |
'''simple docstring'''
import torch
from torch import nn
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Any=1 , UpperCamelCase__ : int=False ):
"""simple docstring"""
super().__init__()
UpperCamelCase = n_token
UpperCamelCase = d_embed
UpperCamelCase = d_proj
UpperCamelCase = cutoffs + [n_token]
UpperCamelCase = [0] + self.cutoffs
UpperCamelCase = div_val
UpperCamelCase = self.cutoffs[0]
UpperCamelCase = len(self.cutoffs ) - 1
UpperCamelCase = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
UpperCamelCase = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
UpperCamelCase = nn.Parameter(torch.zeros(self.n_clusters ) )
UpperCamelCase = nn.ModuleList()
UpperCamelCase = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(UpperCamelCase__ , UpperCamelCase__ ) ) )
else:
self.out_projs.append(UpperCamelCase__ )
self.out_layers.append(nn.Linear(UpperCamelCase__ , UpperCamelCase__ ) )
else:
for i in range(len(self.cutoffs ) ):
UpperCamelCase , UpperCamelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(UpperCamelCase__ , UpperCamelCase__ ) ) )
self.out_layers.append(nn.Linear(UpperCamelCase__ , r_idx - l_idx ) )
UpperCamelCase = keep_order
def A ( self : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
if proj is None:
UpperCamelCase = nn.functional.linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
UpperCamelCase = nn.functional.linear(UpperCamelCase__ , proj.t().contiguous() )
UpperCamelCase = nn.functional.linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def A ( self : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : str=False ):
"""simple docstring"""
if labels is not None:
# Shift so that tokens < n predict n
UpperCamelCase = hidden[..., :-1, :].contiguous()
UpperCamelCase = labels[..., 1:].contiguous()
UpperCamelCase = hidden.view(-1 , hidden.size(-1 ) )
UpperCamelCase = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('Input and labels should have the same size in the batch dimension.' )
else:
UpperCamelCase = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
UpperCamelCase = self._compute_logit(UpperCamelCase__ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
UpperCamelCase = labels != -1_0_0
UpperCamelCase = torch.zeros_like(UpperCamelCase__ , dtype=hidden.dtype , device=hidden.device )
UpperCamelCase = (
-nn.functional.log_softmax(UpperCamelCase__ , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
UpperCamelCase = nn.functional.log_softmax(UpperCamelCase__ , dim=-1 )
else:
# construct weights and biases
UpperCamelCase , UpperCamelCase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCamelCase , UpperCamelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase = self.out_layers[0].weight[l_idx:r_idx]
UpperCamelCase = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCamelCase = self.out_layers[i].weight
UpperCamelCase = self.out_layers[i].bias
if i == 0:
UpperCamelCase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
UpperCamelCase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(UpperCamelCase__ )
biases.append(UpperCamelCase__ )
UpperCamelCase , UpperCamelCase , UpperCamelCase = weights[0], biases[0], self.out_projs[0]
UpperCamelCase = self._compute_logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = nn.functional.log_softmax(UpperCamelCase__ , dim=1 )
if labels is None:
UpperCamelCase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
UpperCamelCase = torch.zeros_like(UpperCamelCase__ , dtype=hidden.dtype , device=hidden.device )
UpperCamelCase = 0
UpperCamelCase = [0] + self.cutoffs
for i in range(len(UpperCamelCase__ ) - 1 ):
UpperCamelCase , UpperCamelCase = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
UpperCamelCase = (labels >= l_idx) & (labels < r_idx)
UpperCamelCase = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
UpperCamelCase = labels.index_select(0 , UpperCamelCase__ ) - l_idx
UpperCamelCase = head_logprob.index_select(0 , UpperCamelCase__ )
UpperCamelCase = hidden.index_select(0 , UpperCamelCase__ )
else:
UpperCamelCase = hidden
if i == 0:
if labels is not None:
UpperCamelCase = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
UpperCamelCase = head_logprob[:, : self.cutoffs[0]]
else:
UpperCamelCase , UpperCamelCase , UpperCamelCase = weights[i], biases[i], self.out_projs[i]
UpperCamelCase = self._compute_logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = nn.functional.log_softmax(UpperCamelCase__ , dim=1 )
UpperCamelCase = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
UpperCamelCase = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
UpperCamelCase = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
UpperCamelCase = logprob_i
if labels is not None:
if (hasattr(self , 'keep_order' ) and self.keep_order) or keep_order:
out.index_copy_(0 , UpperCamelCase__ , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def A ( self : List[Any] , UpperCamelCase__ : str ):
"""simple docstring"""
if self.n_clusters == 0:
UpperCamelCase = self._compute_logit(UpperCamelCase__ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(UpperCamelCase__ , dim=-1 )
else:
# construct weights and biases
UpperCamelCase , UpperCamelCase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCamelCase , UpperCamelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase = self.out_layers[0].weight[l_idx:r_idx]
UpperCamelCase = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCamelCase = self.out_layers[i].weight
UpperCamelCase = self.out_layers[i].bias
if i == 0:
UpperCamelCase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
UpperCamelCase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(UpperCamelCase__ )
biases.append(UpperCamelCase__ )
UpperCamelCase , UpperCamelCase , UpperCamelCase = weights[0], biases[0], self.out_projs[0]
UpperCamelCase = self._compute_logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
UpperCamelCase = nn.functional.log_softmax(UpperCamelCase__ , dim=1 )
UpperCamelCase = [0] + self.cutoffs
for i in range(len(UpperCamelCase__ ) - 1 ):
UpperCamelCase , UpperCamelCase = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
UpperCamelCase = head_logprob[:, : self.cutoffs[0]]
else:
UpperCamelCase , UpperCamelCase , UpperCamelCase = weights[i], biases[i], self.out_projs[i]
UpperCamelCase = self._compute_logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = nn.functional.log_softmax(UpperCamelCase__ , dim=1 )
UpperCamelCase = head_logprob[:, -i] + tail_logprob_i
UpperCamelCase = logprob_i
return out
| 28 |
'''simple docstring'''
from __future__ import annotations
def _UpperCamelCase ( __A , __A , __A ) -> dict[str, float]:
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance < 0:
raise ValueError("Resistance cannot be negative" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80 | 0 |
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
lowercase : Tuple = (
"""This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py"""
)
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple:
warnings.warn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
requires_backends(SCREAMING_SNAKE_CASE__ , """sklearn""" )
return (preds == labels).mean()
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
warnings.warn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
requires_backends(SCREAMING_SNAKE_CASE__ , """sklearn""" )
lowercase : List[Any] = simple_accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = fa_score(y_true=SCREAMING_SNAKE_CASE__ , y_pred=SCREAMING_SNAKE_CASE__ )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
warnings.warn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
requires_backends(SCREAMING_SNAKE_CASE__ , """sklearn""" )
lowercase : Any = pearsonr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )[0]
lowercase : Optional[int] = spearmanr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Dict:
warnings.warn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
requires_backends(SCREAMING_SNAKE_CASE__ , """sklearn""" )
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ), f"Predictions and labels have mismatched lengths {len(SCREAMING_SNAKE_CASE__ )} and {len(SCREAMING_SNAKE_CASE__ )}"
if task_name == "cola":
return {"mcc": matthews_corrcoef(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
elif task_name == "mrpc":
return acc_and_fa(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif task_name == "sts-b":
return pearson_and_spearman(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif task_name == "qqp":
return acc_and_fa(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
elif task_name == "qnli":
return {"acc": simple_accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
elif task_name == "rte":
return {"acc": simple_accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
elif task_name == "wnli":
return {"acc": simple_accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
elif task_name == "hans":
return {"acc": simple_accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
else:
raise KeyError(SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
warnings.warn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
requires_backends(SCREAMING_SNAKE_CASE__ , """sklearn""" )
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError(f"Predictions and labels have mismatched lengths {len(SCREAMING_SNAKE_CASE__ )} and {len(SCREAMING_SNAKE_CASE__ )}" )
if task_name == "xnli":
return {"acc": simple_accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
else:
raise KeyError(SCREAMING_SNAKE_CASE__ )
| 285 |
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
lowercase : Tuple = logging.get_logger(__name__)
class __snake_case ( lowerCAmelCase ):
_a : Optional[Any]= ["input_features", "attention_mask"]
def __init__( self ,snake_case=80 ,snake_case=16000 ,snake_case=0.0 ,snake_case=10 ,snake_case=25 ,snake_case="hamming_window" ,snake_case=32_768.0 ,snake_case=0.97 ,snake_case=1.0 ,snake_case=True ,snake_case=True ,snake_case=False ,**snake_case ,):
'''simple docstring'''
super().__init__(feature_size=snake_case ,sampling_rate=snake_case ,padding_value=snake_case ,**snake_case )
lowercase : Optional[Any] = feature_size
lowercase : List[Any] = sampling_rate
lowercase : int = padding_value
lowercase : Dict = hop_length
lowercase : List[str] = win_length
lowercase : List[Any] = frame_signal_scale
lowercase : List[Any] = preemphasis_coeff
lowercase : str = mel_floor
lowercase : int = normalize_means
lowercase : List[Any] = normalize_vars
lowercase : List[Any] = win_function
lowercase : int = return_attention_mask
lowercase : Any = win_length * sampling_rate // 1000
lowercase : Tuple = hop_length * sampling_rate // 1000
lowercase : Tuple = optimal_fft_length(self.sample_size )
lowercase : Dict = (self.n_fft // 2) + 1
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
if self.win_function == "hamming_window":
lowercase : Optional[Any] = window_function(window_length=self.sample_size ,name=self.win_function ,periodic=snake_case )
else:
lowercase : Optional[Any] = window_function(window_length=self.sample_size ,name=self.win_function )
lowercase : int = mel_filter_bank(
num_frequency_bins=self.n_freqs ,num_mel_filters=self.feature_size ,min_frequency=0.0 ,max_frequency=self.sampling_rate / 2.0 ,sampling_rate=self.sampling_rate ,)
lowercase : Dict = spectrogram(
one_waveform * self.frame_signal_scale ,window=snake_case ,frame_length=self.sample_size ,hop_length=self.sample_stride ,fft_length=self.n_fft ,center=snake_case ,preemphasis=self.preemphasis_coeff ,mel_filters=snake_case ,mel_floor=self.mel_floor ,log_mel="""log""" ,)
return msfc_features.T
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
if self.normalize_means:
lowercase : List[Any] = x[:input_length].mean(axis=0 )
lowercase : Dict = np.subtract(snake_case ,snake_case )
if self.normalize_vars:
lowercase : List[Any] = x[:input_length].std(axis=0 )
lowercase : List[Any] = np.divide(snake_case ,snake_case )
if input_length < x.shape[0]:
lowercase : Any = padding_value
# make sure array is in float32
lowercase : Tuple = x.astype(np.floataa )
return x
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = None ):
'''simple docstring'''
lowercase : Optional[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(snake_case ,snake_case ,self.padding_value ) for x, n in zip(snake_case ,snake_case )]
def __call__( self ,snake_case ,snake_case = False ,snake_case = None ,snake_case = False ,snake_case = None ,snake_case = None ,snake_case = None ,snake_case = None ,**snake_case ,):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
lowercase : List[Any] = isinstance(snake_case ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
lowercase : str = is_batched_numpy or (
isinstance(snake_case ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
lowercase : List[str] = [np.asarray(snake_case ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(snake_case ,np.ndarray ):
lowercase : int = np.asarray(snake_case ,dtype=np.floataa )
elif isinstance(snake_case ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase : Any = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase : Dict = [raw_speech]
# extract fbank features
lowercase : Tuple = [self._extract_mfsc_features(snake_case ) for one_waveform in raw_speech]
# convert into correct format for padding
lowercase : Union[str, Any] = BatchFeature({"""input_features""": features} )
lowercase : Optional[int] = self.pad(
snake_case ,padding=snake_case ,max_length=snake_case ,truncation=snake_case ,pad_to_multiple_of=snake_case ,return_attention_mask=snake_case ,**snake_case ,)
# make sure list is in array format
lowercase : Tuple = padded_inputs.get("""input_features""" )
if isinstance(input_features[0] ,snake_case ):
lowercase : List[Any] = [np.asarray(snake_case ,dtype=np.floataa ) for feature in input_features]
lowercase : int = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
lowercase : Any = [np.asarray(snake_case ,dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
lowercase : List[str] = (
np.array(snake_case ,dtype=np.intaa )
if self._get_padding_strategies(snake_case ,max_length=snake_case ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
lowercase : List[str] = self.normalize(
padded_inputs["""input_features"""] ,attention_mask=snake_case )
if return_tensors is not None:
lowercase : str = padded_inputs.convert_to_tensors(snake_case )
return padded_inputs
| 285 | 1 |
"""simple docstring"""
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def a__ ( snake_case__ = "" ) -> int:
lowerCamelCase = url or """https://www.imdb.com/chart/top/?ref_=nv_mv_250"""
lowerCamelCase = BeautifulSoup(requests.get(_lowerCAmelCase ).text , """html.parser""" )
lowerCamelCase = soup.find_all("""td""" , attrs="""titleColumn""" )
lowerCamelCase = soup.find_all("""td""" , class_="""ratingColumn imdbRating""" )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(_lowerCAmelCase , _lowerCAmelCase )
}
def a__ ( snake_case__ = "IMDb_Top_250_Movies.csv" ) -> Any:
lowerCamelCase = get_imdb_top_aaa_movies()
with open(_lowerCAmelCase , """w""" , newline="""""" ) as out_file:
lowerCamelCase = csv.writer(_lowerCAmelCase )
writer.writerow(["""Movie title""", """IMDb rating"""] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 291 |
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class _lowercase :
'''simple docstring'''
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=99 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=50 , snake_case__=0.02 , snake_case__=True , snake_case__=None , ):
'''simple docstring'''
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = seq_length
UpperCamelCase_ = is_training
UpperCamelCase_ = use_input_mask
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = initializer_range
UpperCamelCase_ = use_labels
UpperCamelCase_ = scope
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ = None
if self.use_input_mask:
UpperCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ = self.get_config()
return config, input_ids, input_mask, token_labels
def _lowerCamelCase ( self ):
'''simple docstring'''
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=snake_case__ , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self ):
'''simple docstring'''
(
(
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) ,
) = self.prepare_config_and_inputs()
UpperCamelCase_ = True
UpperCamelCase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _lowerCamelCase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , **snake_case__ , ):
'''simple docstring'''
UpperCamelCase_ = BertGenerationEncoder(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCamelCase_ = model(snake_case__ , attention_mask=snake_case__ )
UpperCamelCase_ = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , **snake_case__ , ):
'''simple docstring'''
UpperCamelCase_ = True
UpperCamelCase_ = BertGenerationEncoder(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCamelCase_ = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , )
UpperCamelCase_ = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , **snake_case__ , ):
'''simple docstring'''
UpperCamelCase_ = True
UpperCamelCase_ = True
UpperCamelCase_ = BertGenerationDecoder(config=snake_case__ ).to(snake_case__ ).eval()
# first forward pass
UpperCamelCase_ = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , use_cache=snake_case__ , )
UpperCamelCase_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase_ = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase_ = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase_ = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , output_hidden_states=snake_case__ , )["hidden_states"][0]
UpperCamelCase_ = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , past_key_values=snake_case__ , output_hidden_states=snake_case__ , )["hidden_states"][0]
# select random slice
UpperCamelCase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase_ = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1e-3 ) )
def _lowerCamelCase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , *snake_case__ , ):
'''simple docstring'''
UpperCamelCase_ = BertGenerationDecoder(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCamelCase_ = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _lowercase (a_ , a_ , a_ , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
lowercase__ = (BertGenerationDecoder,) if is_torch_available() else ()
lowercase__ = (
{"""feature-extraction""": BertGenerationEncoder, """text-generation""": BertGenerationDecoder}
if is_torch_available()
else {}
)
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = BertGenerationEncoderTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def _lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
UpperCamelCase_ = "bert"
self.model_tester.create_and_check_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
(
(
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCamelCase_ = None
self.model_tester.create_and_check_model_as_decoder(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*snake_case__ )
@slow
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
self.assertIsNotNone(snake_case__ )
@require_torch
class _lowercase (unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
UpperCamelCase_ = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
UpperCamelCase_ = model(snake_case__ )[0]
UpperCamelCase_ = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape , snake_case__ )
UpperCamelCase_ = torch.tensor(
[[[0.1_775, 0.0_083, -0.0_321], [1.6_002, 0.1_287, 0.3_912], [2.1_473, 0.5_791, 0.6_066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1e-4 ) )
@require_torch
class _lowercase (unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
UpperCamelCase_ = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
UpperCamelCase_ = model(snake_case__ )[0]
UpperCamelCase_ = torch.Size([1, 8, 5_0358] )
self.assertEqual(output.shape , snake_case__ )
UpperCamelCase_ = torch.tensor(
[[[-0.5_788, -2.5_994, -3.7_054], [0.0_438, 4.7_997, 1.8_795], [1.5_862, 6.6_409, 4.4_638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1e-4 ) )
| 128 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {
'''configuration_convbert''': ['''CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvBertConfig''', '''ConvBertOnnxConfig'''],
'''tokenization_convbert''': ['''ConvBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['''ConvBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvBertForMaskedLM''',
'''ConvBertForMultipleChoice''',
'''ConvBertForQuestionAnswering''',
'''ConvBertForSequenceClassification''',
'''ConvBertForTokenClassification''',
'''ConvBertLayer''',
'''ConvBertModel''',
'''ConvBertPreTrainedModel''',
'''load_tf_weights_in_convbert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFConvBertForMaskedLM''',
'''TFConvBertForMultipleChoice''',
'''TFConvBertForQuestionAnswering''',
'''TFConvBertForSequenceClassification''',
'''TFConvBertForTokenClassification''',
'''TFConvBertLayer''',
'''TFConvBertModel''',
'''TFConvBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 217 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : int = ["pixel_values"]
def __init__(self ,_lowerCamelCase = True ,_lowerCamelCase = None ,_lowerCamelCase = PILImageResampling.BILINEAR ,_lowerCamelCase = True ,_lowerCamelCase = None ,_lowerCamelCase = True ,_lowerCamelCase = 1 / 255 ,_lowerCamelCase = True ,_lowerCamelCase = None ,_lowerCamelCase = None ,**_lowerCamelCase ,) -> None:
'''simple docstring'''
super().__init__(**_lowerCamelCase )
__lowercase = size if size is not None else {'''shortest_edge''': 256}
__lowercase = get_size_dict(_lowerCamelCase ,default_to_square=_lowerCamelCase )
__lowercase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
__lowercase = get_size_dict(_lowerCamelCase ,param_name='''crop_size''' )
__lowercase = do_resize
__lowercase = size
__lowercase = resample
__lowercase = do_center_crop
__lowercase = crop_size
__lowercase = do_rescale
__lowercase = rescale_factor
__lowercase = do_normalize
__lowercase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowercase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = PILImageResampling.BICUBIC ,_lowerCamelCase = None ,**_lowerCamelCase ,) -> np.ndarray:
'''simple docstring'''
__lowercase = get_size_dict(_lowerCamelCase ,default_to_square=_lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
__lowercase = get_resize_output_image_size(_lowerCamelCase ,size=size['''shortest_edge'''] ,default_to_square=_lowerCamelCase )
return resize(_lowerCamelCase ,size=_lowerCamelCase ,resample=_lowerCamelCase ,data_format=_lowerCamelCase ,**_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = None ,**_lowerCamelCase ,) -> np.ndarray:
'''simple docstring'''
__lowercase = get_size_dict(_lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(_lowerCamelCase ,size=(size['''height'''], size['''width''']) ,data_format=_lowerCamelCase ,**_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = None ,**_lowerCamelCase ) -> np.ndarray:
'''simple docstring'''
return rescale(_lowerCamelCase ,scale=_lowerCamelCase ,data_format=_lowerCamelCase ,**_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = None ,**_lowerCamelCase ,) -> np.ndarray:
'''simple docstring'''
return normalize(_lowerCamelCase ,mean=_lowerCamelCase ,std=_lowerCamelCase ,data_format=_lowerCamelCase ,**_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = ChannelDimension.FIRST ,**_lowerCamelCase ,) -> Any:
'''simple docstring'''
__lowercase = do_resize if do_resize is not None else self.do_resize
__lowercase = size if size is not None else self.size
__lowercase = get_size_dict(_lowerCamelCase ,default_to_square=_lowerCamelCase )
__lowercase = resample if resample is not None else self.resample
__lowercase = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowercase = crop_size if crop_size is not None else self.crop_size
__lowercase = get_size_dict(_lowerCamelCase ,param_name='''crop_size''' )
__lowercase = do_rescale if do_rescale is not None else self.do_rescale
__lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = image_mean if image_mean is not None else self.image_mean
__lowercase = image_std if image_std is not None else self.image_std
__lowercase = make_list_of_images(_lowerCamelCase )
if not valid_images(_lowerCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(_lowerCamelCase ) for image in images]
if do_resize:
__lowercase = [self.resize(image=_lowerCamelCase ,size=_lowerCamelCase ,resample=_lowerCamelCase ) for image in images]
if do_center_crop:
__lowercase = [self.center_crop(image=_lowerCamelCase ,size=_lowerCamelCase ) for image in images]
if do_rescale:
__lowercase = [self.rescale(image=_lowerCamelCase ,scale=_lowerCamelCase ) for image in images]
if do_normalize:
__lowercase = [self.normalize(image=_lowerCamelCase ,mean=_lowerCamelCase ,std=_lowerCamelCase ) for image in images]
__lowercase = [to_channel_dimension_format(_lowerCamelCase ,_lowerCamelCase ) for image in images]
__lowercase = {'''pixel_values''': images}
return BatchFeature(data=_lowerCamelCase ,tensor_type=_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ) -> str:
'''simple docstring'''
__lowercase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(_lowerCamelCase ):
__lowercase = target_sizes.numpy()
__lowercase = []
for idx in range(len(_lowerCamelCase ) ):
__lowercase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode='''bilinear''' ,align_corners=_lowerCamelCase )
__lowercase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_lowerCamelCase )
else:
__lowercase = logits.argmax(dim=1 )
__lowercase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 217 | 1 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class A__ ( __snake_case ):
_UpperCAmelCase :List[Any] = (DDIMParallelScheduler,)
_UpperCAmelCase :Any = (('eta', 0.0), ('num_inference_steps', 5_0))
def __UpperCamelCase( self , **A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = {
"num_train_timesteps": 1000,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**A_ )
return config
def __UpperCamelCase( self , **A_ ):
'''simple docstring'''
UpperCamelCase : Tuple = self.scheduler_classes[0]
UpperCamelCase : List[Any] = self.get_scheduler_config(**A_ )
UpperCamelCase : Dict = scheduler_class(**A_ )
UpperCamelCase , UpperCamelCase : Tuple = 10, 0.0
UpperCamelCase : List[Any] = self.dummy_model()
UpperCamelCase : Union[str, Any] = self.dummy_sample_deter
scheduler.set_timesteps(A_ )
for t in scheduler.timesteps:
UpperCamelCase : Any = model(A_ , A_ )
UpperCamelCase : Any = scheduler.step(A_ , A_ , A_ , A_ ).prev_sample
return sample
def __UpperCamelCase( self ):
'''simple docstring'''
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=A_ )
UpperCamelCase : Optional[Any] = self.scheduler_classes[0]
UpperCamelCase : Dict = self.get_scheduler_config(steps_offset=1 )
UpperCamelCase : List[str] = scheduler_class(**A_ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def __UpperCamelCase( self ):
'''simple docstring'''
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=A_ , beta_end=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
self.check_over_configs(thresholding=A_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=A_ , prediction_type=A_ , sample_max_value=A_ , )
def __UpperCamelCase( self ):
'''simple docstring'''
for t in [1, 10, 49]:
self.check_over_forward(time_step=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=A_ , num_inference_steps=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=A_ , eta=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = self.scheduler_classes[0]
UpperCamelCase : Union[str, Any] = self.get_scheduler_config()
UpperCamelCase : List[Any] = scheduler_class(**A_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.1_47_71 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.3_24_60 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.0_09_79 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1e-5
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.scheduler_classes[0]
UpperCamelCase : Any = self.get_scheduler_config()
UpperCamelCase : List[Any] = scheduler_class(**A_ )
UpperCamelCase , UpperCamelCase : Tuple = 10, 0.0
scheduler.set_timesteps(A_ )
UpperCamelCase : Tuple = self.dummy_model()
UpperCamelCase : List[str] = self.dummy_sample_deter
UpperCamelCase : Optional[int] = self.dummy_sample_deter + 0.1
UpperCamelCase : Optional[int] = self.dummy_sample_deter - 0.1
UpperCamelCase : Optional[Any] = samplea.shape[0]
UpperCamelCase : Dict = torch.stack([samplea, samplea, samplea] , dim=0 )
UpperCamelCase : int = torch.arange(A_ )[0:3, None].repeat(1 , A_ )
UpperCamelCase : Tuple = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
UpperCamelCase : Optional[int] = scheduler.batch_step_no_noise(A_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , A_ )
UpperCamelCase : Optional[Any] = torch.sum(torch.abs(A_ ) )
UpperCamelCase : Any = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 11_47.79_04 ) < 1e-2
assert abs(result_mean.item() - 0.49_82 ) < 1e-3
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = self.full_loop()
UpperCamelCase : Dict = torch.sum(torch.abs(A_ ) )
UpperCamelCase : Dict = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 1_72.00_67 ) < 1e-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1e-3
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.full_loop(prediction_type="v_prediction" )
UpperCamelCase : List[str] = torch.sum(torch.abs(A_ ) )
UpperCamelCase : Optional[int] = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 52.53_02 ) < 1e-2
assert abs(result_mean.item() - 0.06_84 ) < 1e-3
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = self.full_loop(set_alpha_to_one=A_ , beta_start=0.01 )
UpperCamelCase : Dict = torch.sum(torch.abs(A_ ) )
UpperCamelCase : Union[str, Any] = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 1_49.82_95 ) < 1e-2
assert abs(result_mean.item() - 0.19_51 ) < 1e-3
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = self.full_loop(set_alpha_to_one=A_ , beta_start=0.01 )
UpperCamelCase : Union[str, Any] = torch.sum(torch.abs(A_ ) )
UpperCamelCase : Dict = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 1_49.07_84 ) < 1e-2
assert abs(result_mean.item() - 0.19_41 ) < 1e-3
| 52 |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : "DiagonalGaussianDistribution"
class lowerCamelCase (_snake_case , _snake_case ):
'''simple docstring'''
_snake_case : Optional[int] = True
@register_to_config
def __init__( self , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = ("DownEncoderBlock2D",) , _UpperCamelCase = ("UpDecoderBlock2D",) , _UpperCamelCase = (6_4,) , _UpperCamelCase = 1 , _UpperCamelCase = "silu" , _UpperCamelCase = 4 , _UpperCamelCase = 3_2 , _UpperCamelCase = 3_2 , _UpperCamelCase = 0.1_82_15 , ) -> List[Any]:
super().__init__()
# pass init params to Encoder
UpperCAmelCase_ : List[str] = Encoder(
in_channels=_UpperCamelCase , out_channels=_UpperCamelCase , down_block_types=_UpperCamelCase , block_out_channels=_UpperCamelCase , layers_per_block=_UpperCamelCase , act_fn=_UpperCamelCase , norm_num_groups=_UpperCamelCase , double_z=_UpperCamelCase , )
# pass init params to Decoder
UpperCAmelCase_ : Dict = Decoder(
in_channels=_UpperCamelCase , out_channels=_UpperCamelCase , up_block_types=_UpperCamelCase , block_out_channels=_UpperCamelCase , layers_per_block=_UpperCamelCase , norm_num_groups=_UpperCamelCase , act_fn=_UpperCamelCase , )
UpperCAmelCase_ : Any = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
UpperCAmelCase_ : List[Any] = nn.Convad(_UpperCamelCase , _UpperCamelCase , 1 )
UpperCAmelCase_ : Any = False
UpperCAmelCase_ : int = False
# only relevant if vae tiling is enabled
UpperCAmelCase_ : Optional[int] = self.config.sample_size
UpperCAmelCase_ : int = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
UpperCAmelCase_ : Union[str, Any] = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
UpperCAmelCase_ : Optional[Any] = 0.25
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=False ) -> List[str]:
if isinstance(_UpperCamelCase , (Encoder, Decoder) ):
UpperCAmelCase_ : Union[str, Any] = value
def __UpperCAmelCase ( self , _UpperCamelCase = True ) -> int:
UpperCAmelCase_ : Tuple = use_tiling
def __UpperCAmelCase ( self ) -> Dict:
self.enable_tiling(_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : str = True
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __UpperCAmelCase ( self ) -> Dict[str, AttentionProcessor]:
UpperCAmelCase_ : Optional[int] = {}
def fn_recursive_add_processors(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if hasattr(_UpperCamelCase , 'set_processor' ):
UpperCAmelCase_ : Optional[int] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"{name}.{sub_name}" , _UpperCamelCase , _UpperCamelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return processors
def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = len(self.attn_processors.keys() )
if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) != count:
raise ValueError(
f"A dict of processors was passed, but the number of processors {len(_UpperCamelCase )} does not match the"
f" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if hasattr(_UpperCamelCase , 'set_processor' ):
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
module.set_processor(_UpperCamelCase )
else:
module.set_processor(processor.pop(f"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"{name}.{sub_name}" , _UpperCamelCase , _UpperCamelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> AutoencoderKLOutput:
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(_UpperCamelCase , return_dict=_UpperCamelCase )
if self.use_slicing and x.shape[0] > 1:
UpperCAmelCase_ : Union[str, Any] = [self.encoder(_UpperCamelCase ) for x_slice in x.split(1 )]
UpperCAmelCase_ : Tuple = torch.cat(_UpperCamelCase )
else:
UpperCAmelCase_ : List[Any] = self.encoder(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = self.quant_conv(_UpperCamelCase )
UpperCAmelCase_ : Tuple = DiagonalGaussianDistribution(_UpperCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(_UpperCamelCase , return_dict=_UpperCamelCase )
UpperCAmelCase_ : str = self.post_quant_conv(_UpperCamelCase )
UpperCAmelCase_ : List[str] = self.decoder(_UpperCamelCase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCamelCase )
@apply_forward_hook
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_slicing and z.shape[0] > 1:
UpperCAmelCase_ : List[str] = [self._decode(_UpperCamelCase ).sample for z_slice in z.split(1 )]
UpperCAmelCase_ : Dict = torch.cat(_UpperCamelCase )
else:
UpperCAmelCase_ : Any = self._decode(_UpperCamelCase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : Tuple = min(a.shape[2] , b.shape[2] , _UpperCamelCase )
for y in range(_UpperCamelCase ):
UpperCAmelCase_ : str = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
UpperCAmelCase_ : Tuple = min(a.shape[3] , b.shape[3] , _UpperCamelCase )
for x in range(_UpperCamelCase ):
UpperCAmelCase_ : int = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> AutoencoderKLOutput:
UpperCAmelCase_ : Any = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase_ : Tuple = int(self.tile_latent_min_size * self.tile_overlap_factor )
UpperCAmelCase_ : Optional[int] = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
UpperCAmelCase_ : List[str] = []
for i in range(0 , x.shape[2] , _UpperCamelCase ):
UpperCAmelCase_ : Any = []
for j in range(0 , x.shape[3] , _UpperCamelCase ):
UpperCAmelCase_ : Any = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
UpperCAmelCase_ : Dict = self.encoder(_UpperCamelCase )
UpperCAmelCase_ : List[str] = self.quant_conv(_UpperCamelCase )
row.append(_UpperCamelCase )
rows.append(_UpperCamelCase )
UpperCAmelCase_ : str = []
for i, row in enumerate(_UpperCamelCase ):
UpperCAmelCase_ : List[Any] = []
for j, tile in enumerate(_UpperCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase_ : Dict = self.blend_v(rows[i - 1][j] , _UpperCamelCase , _UpperCamelCase )
if j > 0:
UpperCAmelCase_ : List[str] = self.blend_h(row[j - 1] , _UpperCamelCase , _UpperCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_UpperCamelCase , dim=3 ) )
UpperCAmelCase_ : Union[str, Any] = torch.cat(_UpperCamelCase , dim=2 )
UpperCAmelCase_ : List[Any] = DiagonalGaussianDistribution(_UpperCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
UpperCAmelCase_ : str = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase_ : Dict = int(self.tile_sample_min_size * self.tile_overlap_factor )
UpperCAmelCase_ : Dict = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
UpperCAmelCase_ : Union[str, Any] = []
for i in range(0 , z.shape[2] , _UpperCamelCase ):
UpperCAmelCase_ : List[str] = []
for j in range(0 , z.shape[3] , _UpperCamelCase ):
UpperCAmelCase_ : List[str] = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
UpperCAmelCase_ : Optional[Any] = self.post_quant_conv(_UpperCamelCase )
UpperCAmelCase_ : Tuple = self.decoder(_UpperCamelCase )
row.append(_UpperCamelCase )
rows.append(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = []
for i, row in enumerate(_UpperCamelCase ):
UpperCAmelCase_ : List[Any] = []
for j, tile in enumerate(_UpperCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase_ : Union[str, Any] = self.blend_v(rows[i - 1][j] , _UpperCamelCase , _UpperCamelCase )
if j > 0:
UpperCAmelCase_ : Optional[Any] = self.blend_h(row[j - 1] , _UpperCamelCase , _UpperCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_UpperCamelCase , dim=3 ) )
UpperCAmelCase_ : Dict = torch.cat(_UpperCamelCase , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = False , _UpperCamelCase = True , _UpperCamelCase = None , ) -> Union[DecoderOutput, torch.FloatTensor]:
UpperCAmelCase_ : Optional[Any] = sample
UpperCAmelCase_ : Union[str, Any] = self.encode(_UpperCamelCase ).latent_dist
if sample_posterior:
UpperCAmelCase_ : str = posterior.sample(generator=_UpperCamelCase )
else:
UpperCAmelCase_ : int = posterior.mode()
UpperCAmelCase_ : Dict = self.decode(_UpperCamelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCamelCase )
| 29 | 0 |
'''simple docstring'''
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
__lowerCAmelCase : Dict =None
try:
import msvcrt
except ImportError:
__lowerCAmelCase : Optional[Any] =None
try:
import fcntl
except ImportError:
__lowerCAmelCase : Any =None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
__lowerCAmelCase : Any =OSError
# Data
# ------------------------------------------------
__lowerCAmelCase : Dict =[
"Timeout",
"BaseFileLock",
"WindowsFileLock",
"UnixFileLock",
"SoftFileLock",
"FileLock",
]
__lowerCAmelCase : Tuple ="3.0.12"
__lowerCAmelCase : Tuple =None
def UpperCamelCase ( ):
global _logger
A__ = _logger or logging.getLogger(__name__ )
return _logger
class UpperCAmelCase ( UpperCamelCase__ ):
def __init__( self :str , lowercase_ :Optional[Any] )-> Optional[int]:
A__ = lock_file
return None
def __str__( self :Tuple )-> Dict:
A__ = F"The file lock '{self.lock_file}' could not be acquired."
return temp
class UpperCAmelCase :
def __init__( self :List[Any] , lowercase_ :Optional[Any] )-> Any:
A__ = lock
return None
def __enter__( self :Union[str, Any] )-> Optional[int]:
return self.lock
def __exit__( self :Any , lowercase_ :Union[str, Any] , lowercase_ :str , lowercase_ :int )-> List[str]:
self.lock.release()
return None
class UpperCAmelCase :
def __init__( self :Optional[Any] , lowercase_ :Tuple , lowercase_ :str=-1 , lowercase_ :List[str]=None )-> Tuple:
A__ = max_filename_length if max_filename_length is not None else 2_55
# Hash the filename if it's too long
A__ = self.hash_filename_if_too_long(lowercase_ , lowercase_ )
# The path to the lock file.
A__ = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
A__ = None
# The default timeout value.
A__ = timeout
# We use this lock primarily for the lock counter.
A__ = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
A__ = 0
return None
@property
def UpperCAmelCase_ ( self :Any )-> str:
return self._lock_file
@property
def UpperCAmelCase_ ( self :List[str] )-> Optional[Any]:
return self._timeout
@timeout.setter
def UpperCAmelCase_ ( self :Optional[Any] , lowercase_ :int )-> Tuple:
A__ = float(lowercase_ )
return None
def UpperCAmelCase_ ( self :Dict )-> str:
raise NotImplementedError()
def UpperCAmelCase_ ( self :Dict )-> Tuple:
raise NotImplementedError()
@property
def UpperCAmelCase_ ( self :str )-> List[str]:
return self._lock_file_fd is not None
def UpperCAmelCase_ ( self :str , lowercase_ :int=None , lowercase_ :Any=0.0_5 )-> Dict:
# Use the default timeout, if no timeout is provided.
if timeout is None:
A__ = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
A__ = id(self )
A__ = self._lock_file
A__ = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"Attempting to acquire lock {lock_id} on {lock_filename}" )
self._acquire()
if self.is_locked:
logger().debug(F"Lock {lock_id} acquired on {lock_filename}" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"Timeout on acquiring lock {lock_id} on {lock_filename}" )
raise Timeout(self._lock_file )
else:
logger().debug(
F"Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ..." )
time.sleep(lowercase_ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
A__ = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def UpperCAmelCase_ ( self :Union[str, Any] , lowercase_ :Tuple=False )-> Union[str, Any]:
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
A__ = id(self )
A__ = self._lock_file
logger().debug(F"Attempting to release lock {lock_id} on {lock_filename}" )
self._release()
A__ = 0
logger().debug(F"Lock {lock_id} released on {lock_filename}" )
return None
def __enter__( self :str )-> Optional[Any]:
self.acquire()
return self
def __exit__( self :Optional[Any] , lowercase_ :List[Any] , lowercase_ :List[str] , lowercase_ :Tuple )-> Any:
self.release()
return None
def __del__( self :Optional[Any] )-> List[str]:
self.release(force=lowercase_ )
return None
def UpperCAmelCase_ ( self :List[str] , lowercase_ :str , lowercase_ :int )-> str:
A__ = os.path.basename(lowercase_ )
if len(lowercase_ ) > max_length and max_length > 0:
A__ = os.path.dirname(lowercase_ )
A__ = str(hash(lowercase_ ) )
A__ = filename[: max_length - len(lowercase_ ) - 8] + "..." + hashed_filename + ".lock"
return os.path.join(lowercase_ , lowercase_ )
else:
return path
class UpperCAmelCase ( UpperCamelCase__ ):
def __init__( self :Tuple , lowercase_ :List[Any] , lowercase_ :str=-1 , lowercase_ :int=None )-> Optional[int]:
from .file_utils import relative_to_absolute_path
super().__init__(lowercase_ , timeout=lowercase_ , max_filename_length=lowercase_ )
A__ = "\\\\?\\" + relative_to_absolute_path(self.lock_file )
def UpperCAmelCase_ ( self :int )-> Union[str, Any]:
A__ = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
A__ = os.open(self._lock_file , lowercase_ )
except OSError:
pass
else:
try:
msvcrt.locking(lowercase_ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(lowercase_ )
else:
A__ = fd
return None
def UpperCAmelCase_ ( self :List[Any] )-> Optional[Any]:
A__ = self._lock_file_fd
A__ = None
msvcrt.locking(lowercase_ , msvcrt.LK_UNLCK , 1 )
os.close(lowercase_ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class UpperCAmelCase ( UpperCamelCase__ ):
def __init__( self :List[Any] , lowercase_ :str , lowercase_ :Tuple=-1 , lowercase_ :Any=None )-> List[str]:
A__ = os.statvfs(os.path.dirname(lowercase_ ) ).f_namemax
super().__init__(lowercase_ , timeout=lowercase_ , max_filename_length=lowercase_ )
def UpperCAmelCase_ ( self :Dict )-> Dict:
A__ = os.O_RDWR | os.O_CREAT | os.O_TRUNC
A__ = os.open(self._lock_file , lowercase_ )
try:
fcntl.flock(lowercase_ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(lowercase_ )
else:
A__ = fd
return None
def UpperCAmelCase_ ( self :Optional[int] )-> Optional[Any]:
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
A__ = self._lock_file_fd
A__ = None
fcntl.flock(lowercase_ , fcntl.LOCK_UN )
os.close(lowercase_ )
return None
class UpperCAmelCase ( UpperCamelCase__ ):
def UpperCAmelCase_ ( self :Any )-> Optional[Any]:
A__ = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
A__ = os.open(self._lock_file , lowercase_ )
except OSError:
pass
else:
A__ = fd
return None
def UpperCAmelCase_ ( self :Optional[int] )-> Dict:
os.close(self._lock_file_fd )
A__ = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
__lowerCAmelCase : Any =None
if msvcrt:
__lowerCAmelCase : Dict =WindowsFileLock
elif fcntl:
__lowerCAmelCase : str =UnixFileLock
else:
__lowerCAmelCase : int =SoftFileLock
if warnings is not None:
warnings.warn("only soft file lock is available")
| 123 |
'''simple docstring'''
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__lowerCAmelCase : Tuple =trt.Logger(trt.Logger.WARNING)
__lowerCAmelCase : Optional[Any] =absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__lowerCAmelCase : List[Any] =logging.getLogger(__name__)
__lowerCAmelCase : Optional[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--onnx_model_path",
default=None,
type=str,
required=True,
help="Path to ONNX model: ",
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model checkpoints and predictions will be written.",
)
# Other parameters
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
required=True,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--version_2_with_negative",
action="store_true",
help="If true, the SQuAD examples contain some that do not have an answer.",
)
parser.add_argument(
"--null_score_diff_threshold",
type=float,
default=0.0,
help="If null_score - best_non_null is greater than the threshold predict null.",
)
parser.add_argument(
"--max_seq_length",
default=384,
type=int,
help=(
"The maximum total input sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded."
),
)
parser.add_argument(
"--doc_stride",
default=128,
type=int,
help="When splitting up a long document into chunks, how much stride to take between chunks.",
)
parser.add_argument("--per_device_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.")
parser.add_argument(
"--n_best_size",
default=20,
type=int,
help="The total number of n-best predictions to generate in the nbest_predictions.json output file.",
)
parser.add_argument(
"--max_answer_length",
default=30,
type=int,
help=(
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
),
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--dataset_name",
type=str,
default=None,
required=True,
help="The name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--dataset_config_name",
type=str,
default=None,
help="The configuration name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--preprocessing_num_workers", type=int, default=4, help="A csv or a json file containing the training data."
)
parser.add_argument("--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision instead of 32-bit",
)
parser.add_argument(
"--int8",
action="store_true",
help="Whether to use INT8",
)
__lowerCAmelCase : Tuple =parser.parse_args()
if args.tokenizer_name:
__lowerCAmelCase : int =AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
logger.info("Training/evaluation parameters %s", args)
__lowerCAmelCase : Union[str, Any] =args.per_device_eval_batch_size
__lowerCAmelCase : List[Any] =(args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__lowerCAmelCase : Tuple =True
__lowerCAmelCase : int ="temp_engine/bert-fp32.engine"
if args.fpaa:
__lowerCAmelCase : Tuple ="temp_engine/bert-fp16.engine"
if args.inta:
__lowerCAmelCase : Optional[int] ="temp_engine/bert-int8.engine"
# import ONNX file
if not os.path.exists("temp_engine"):
os.makedirs("temp_engine")
__lowerCAmelCase : Tuple =1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, "rb") as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__lowerCAmelCase : Optional[Any] =[network.get_input(i) for i in range(network.num_inputs)]
__lowerCAmelCase : Any =[_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__lowerCAmelCase : Optional[Any] =1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__lowerCAmelCase : int =builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__lowerCAmelCase : Dict =builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, "wb") as f:
f.write(engine.serialize())
def UpperCamelCase ( _lowerCamelCase : Any , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[int] , _lowerCamelCase : Any , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : List[Any] , _lowerCamelCase : Dict ):
A__ = np.asarray(inputs["input_ids"] , dtype=np.intaa )
A__ = np.asarray(inputs["attention_mask"] , dtype=np.intaa )
A__ = np.asarray(inputs["token_type_ids"] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , _lowerCamelCase )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , _lowerCamelCase )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , _lowerCamelCase )
# start time
A__ = time.time()
# Run inference
context.execute_async(
bindings=[int(_lowerCamelCase ) for d_inp in d_inputs] + [int(_lowerCamelCase ), int(_lowerCamelCase )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
cuda.memcpy_dtoh_async(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Synchronize the stream and take time
stream.synchronize()
# end time
A__ = time.time()
A__ = end_time - start_time
A__ = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__lowerCAmelCase : str =Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__lowerCAmelCase : List[Any] =load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError("Evaluation requires a dataset name")
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__lowerCAmelCase : Optional[Any] =raw_datasets["validation"].column_names
__lowerCAmelCase : Optional[Any] ="question" if "question" in column_names else column_names[0]
__lowerCAmelCase : str ="context" if "context" in column_names else column_names[1]
__lowerCAmelCase : Optional[Any] ="answers" if "answers" in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__lowerCAmelCase : Any =tokenizer.padding_side == "right"
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."""
)
__lowerCAmelCase : Any =min(args.max_seq_length, tokenizer.model_max_length)
def UpperCamelCase ( _lowerCamelCase : Optional[int] ):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
A__ = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
A__ = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="only_second" if pad_on_right else "only_first" , max_length=_lowerCamelCase , stride=args.doc_stride , return_overflowing_tokens=_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , padding="max_length" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
A__ = tokenized_examples.pop("overflow_to_sample_mapping" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
A__ = []
for i in range(len(tokenized_examples["input_ids"] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
A__ = tokenized_examples.sequence_ids(_lowerCamelCase )
A__ = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
A__ = sample_mapping[i]
tokenized_examples["example_id"].append(examples["id"][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
A__ = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["offset_mapping"][i] )
]
return tokenized_examples
__lowerCAmelCase : str =raw_datasets["validation"]
# Validation Feature Creation
__lowerCAmelCase : Union[str, Any] =eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="Running tokenizer on validation dataset",
)
__lowerCAmelCase : List[Any] =default_data_collator
__lowerCAmelCase : List[Any] =eval_dataset.remove_columns(["example_id", "offset_mapping"])
__lowerCAmelCase : List[str] =DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def UpperCamelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any]="eval" ):
# Post-processing: we match the start logits and end logits to answers in the original context.
A__ = postprocess_qa_predictions(
examples=_lowerCamelCase , features=_lowerCamelCase , predictions=_lowerCamelCase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=_lowerCamelCase , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
A__ = [
{"id": k, "prediction_text": v, "no_answer_probability": 0.0} for k, v in predictions.items()
]
else:
A__ = [{"id": k, "prediction_text": v} for k, v in predictions.items()]
A__ = [{"id": ex["id"], "answers": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=_lowerCamelCase , label_ids=_lowerCamelCase )
__lowerCAmelCase : Tuple =load_metric("squad_v2" if args.version_2_with_negative else "squad")
# Evaluation!
logger.info("Loading ONNX model %s for evaluation", args.onnx_model_path)
with open(engine_name, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def UpperCamelCase ( _lowerCamelCase : Union[str, Any] ):
return trt.volume(engine.get_binding_shape(_lowerCamelCase ) ) * engine.get_binding_dtype(_lowerCamelCase ).itemsize
# Allocate device memory for inputs and outputs.
__lowerCAmelCase : Any =[cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__lowerCAmelCase : List[Any] =cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__lowerCAmelCase : List[str] =cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__lowerCAmelCase : List[str] =cuda.mem_alloc(h_outputa.nbytes)
__lowerCAmelCase : int =cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__lowerCAmelCase : Optional[Any] =cuda.Stream()
# Evaluation
logger.info("***** Running Evaluation *****")
logger.info(f""" Num examples = {len(eval_dataset)}""")
logger.info(f""" Batch size = {args.per_device_eval_batch_size}""")
__lowerCAmelCase : str =0.0
__lowerCAmelCase : Tuple =0
__lowerCAmelCase : List[str] =timeit.default_timer()
__lowerCAmelCase : Union[str, Any] =None
for step, batch in enumerate(eval_dataloader):
__lowerCAmelCase , __lowerCAmelCase : Dict =model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__lowerCAmelCase , __lowerCAmelCase : List[Any] =outputs
__lowerCAmelCase : Tuple =torch.tensor(start_logits)
__lowerCAmelCase : Tuple =torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__lowerCAmelCase : Tuple =accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__lowerCAmelCase : Union[str, Any] =accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__lowerCAmelCase : int =(accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__lowerCAmelCase : List[Any] =logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__lowerCAmelCase : Dict =nested_truncate(all_preds, len(eval_dataset))
__lowerCAmelCase : Optional[int] =timeit.default_timer() - start_time
logger.info(" Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info("Average Inference Time = {:.3f} ms".format(total_time * 1000 / niter))
logger.info("Total Inference Time = {:.3f} ms".format(total_time * 1000))
logger.info("Total Number of Inference = %d", niter)
__lowerCAmelCase : Optional[Any] =post_processing_function(eval_examples, eval_dataset, all_preds)
__lowerCAmelCase : Optional[Any] =metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f"""Evaluation metrics: {eval_metric}""")
| 123 | 1 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def __snake_case( _lowerCAmelCase ) -> Any:
snake_case__ : Any = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
snake_case__ : List[Any] = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
snake_case__ : Optional[int] = 4
snake_case__ : Any = 48
snake_case__ : List[Any] = """pixelshuffle_aux"""
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
snake_case__ : Tuple = [6, 6, 6, 6]
snake_case__ : Dict = 60
snake_case__ : str = [6, 6, 6, 6]
snake_case__ : str = """pixelshuffledirect"""
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
snake_case__ : Dict = 4
snake_case__ : str = """nearest+conv"""
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
snake_case__ : Any = 1
snake_case__ : Dict = 1
snake_case__ : Tuple = 126
snake_case__ : Dict = 7
snake_case__ : Tuple = 255.0
snake_case__ : Tuple = """"""
return config
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
if "patch_embed.proj" in name and "layers" not in name:
snake_case__ : Any = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
snake_case__ : int = name.replace("""patch_embed.norm""" , """embeddings.patch_embeddings.layernorm""" )
if "layers" in name:
snake_case__ : List[str] = name.replace("""layers""" , """encoder.stages""" )
if "residual_group.blocks" in name:
snake_case__ : Optional[int] = name.replace("""residual_group.blocks""" , """layers""" )
if "attn.proj" in name:
snake_case__ : Optional[Any] = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
snake_case__ : Optional[int] = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
snake_case__ : str = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
snake_case__ : List[str] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
snake_case__ : int = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
snake_case__ : int = name.replace("""mlp.fc2""" , """output.dense""" )
if "q_bias" in name:
snake_case__ : Tuple = name.replace("""q_bias""" , """query.bias""" )
if "k_bias" in name:
snake_case__ : Dict = name.replace("""k_bias""" , """key.bias""" )
if "v_bias" in name:
snake_case__ : str = name.replace("""v_bias""" , """value.bias""" )
if "cpb_mlp" in name:
snake_case__ : Optional[int] = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""" )
if "patch_embed.proj" in name:
snake_case__ : List[str] = name.replace("""patch_embed.proj""" , """patch_embed.projection""" )
if name == "norm.weight":
snake_case__ : str = """layernorm.weight"""
if name == "norm.bias":
snake_case__ : Tuple = """layernorm.bias"""
if "conv_first" in name:
snake_case__ : int = name.replace("""conv_first""" , """first_convolution""" )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
snake_case__ : str = name.replace("""conv_last""" , """final_convolution""" )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
snake_case__ : List[Any] = name.replace("""conv_before_upsample.0""" , """conv_before_upsample""" )
if "upsample.0" in name:
snake_case__ : List[str] = name.replace("""upsample.0""" , """upsample.convolution_0""" )
if "upsample.2" in name:
snake_case__ : int = name.replace("""upsample.2""" , """upsample.convolution_1""" )
snake_case__ : Dict = """upsample.""" + name
elif config.upsampler == "pixelshuffledirect":
snake_case__ : str = name.replace("""upsample.0.weight""" , """upsample.conv.weight""" )
snake_case__ : Optional[Any] = name.replace("""upsample.0.bias""" , """upsample.conv.bias""" )
else:
pass
else:
snake_case__ : Any = """swin2sr.""" + name
return name
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
for key in orig_state_dict.copy().keys():
snake_case__ : Tuple = orig_state_dict.pop(_lowerCAmelCase )
if "qkv" in key:
snake_case__ : Optional[int] = key.split(""".""" )
snake_case__ : Tuple = int(key_split[1] )
snake_case__ : List[Any] = int(key_split[4] )
snake_case__ : List[Any] = config.embed_dim
if "weight" in key:
snake_case__ : Any = val[:dim, :]
snake_case__ : Dict = val[dim : dim * 2, :]
snake_case__ : Any = val[-dim:, :]
else:
snake_case__ : str = val[:dim]
snake_case__ : int = val[dim : dim * 2]
snake_case__ : List[Any] = val[-dim:]
pass
else:
snake_case__ : Tuple = val
return orig_state_dict
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
snake_case__ : Tuple = get_config(_lowerCAmelCase )
snake_case__ : List[Any] = SwinaSRForImageSuperResolution(_lowerCAmelCase )
model.eval()
snake_case__ : Union[str, Any] = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="""cpu""" )
snake_case__ : Optional[int] = convert_state_dict(_lowerCAmelCase , _lowerCAmelCase )
snake_case__ , snake_case__ : Union[str, Any] = model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
raise ValueError("""Missing keys when converting: {}""".format(_lowerCAmelCase ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f"Unexpected key {key} in state_dict" )
# verify values
snake_case__ : Optional[Any] = """https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true"""
snake_case__ : List[Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw ).convert("""RGB""" )
snake_case__ : Tuple = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
snake_case__ : List[Any] = 126 if """Jpeg""" in checkpoint_url else 256
snake_case__ : Tuple = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
snake_case__ : Union[str, Any] = transforms(_lowerCAmelCase ).unsqueeze(0 )
if config.num_channels == 1:
snake_case__ : List[Any] = pixel_values[:, 0, :, :].unsqueeze(1 )
snake_case__ : Optional[int] = model(_lowerCAmelCase )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
snake_case__ : Optional[Any] = torch.Size([1, 3, 512, 512] )
snake_case__ : Any = torch.tensor(
[[-0.7087, -0.7138, -0.6721], [-0.8340, -0.8095, -0.7298], [-0.9149, -0.8414, -0.7940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
snake_case__ : List[str] = torch.Size([1, 3, 1_024, 1_024] )
snake_case__ : Any = torch.tensor(
[[-0.7775, -0.8105, -0.8933], [-0.7764, -0.8356, -0.9225], [-0.7976, -0.8686, -0.9579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
snake_case__ : List[Any] = torch.Size([1, 3, 1_024, 1_024] )
snake_case__ : Tuple = torch.tensor(
[[-0.8035, -0.7504, -0.7491], [-0.8538, -0.8124, -0.7782], [-0.8804, -0.8651, -0.8493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
snake_case__ : Any = torch.Size([1, 3, 512, 512] )
snake_case__ : Dict = torch.tensor(
[[-0.7669, -0.8662, -0.8767], [-0.8810, -0.9962, -0.9820], [-0.9340, -1.0322, -1.1149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
snake_case__ : Union[str, Any] = torch.Size([1, 3, 1_024, 1_024] )
snake_case__ : Optional[Any] = torch.tensor(
[[-0.5238, -0.5557, -0.6321], [-0.6016, -0.5903, -0.6391], [-0.6244, -0.6334, -0.6889]] )
assert (
outputs.reconstruction.shape == expected_shape
), f"Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , _lowerCAmelCase , atol=1e-3 )
print("""Looks ok!""" )
snake_case__ : Optional[Any] = {
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""": (
"""swin2SR-classical-sr-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth""": (
"""swin2SR-classical-sr-x4-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth""": (
"""swin2SR-compressed-sr-x4-48"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth""": (
"""swin2SR-lightweight-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth""": (
"""swin2SR-realworld-sr-x4-64-bsrgan-psnr"""
),
}
snake_case__ : Union[str, Any] = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
model.push_to_hub(f"caidas/{model_name}" )
processor.push_to_hub(f"caidas/{model_name}" )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth",
type=str,
help="URL of the original Swin2SR checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the converted model to the hub.")
__a = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 35 |
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowercase (_UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase = ConsistencyModelPipeline
_UpperCamelCase = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
_UpperCamelCase = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
_UpperCamelCase = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
@property
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : List[Any] = UNetaDModel.from_pretrained(
'''diffusers/consistency-models-test''' , subfolder='''test_unet''' , )
return unet
@property
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : List[str] = UNetaDModel.from_pretrained(
'''diffusers/consistency-models-test''' , subfolder='''test_unet_class_cond''' , )
return unet
def UpperCamelCase__ ( self , A_=False ) ->Dict:
'''simple docstring'''
if class_cond:
__lowerCAmelCase : List[str] = self.dummy_cond_unet
else:
__lowerCAmelCase : Optional[Any] = self.dummy_uncond_unet
# Default to CM multistep sampler
__lowerCAmelCase : List[str] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__lowerCAmelCase : Dict = {
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def UpperCamelCase__ ( self , A_ , A_=0 ) ->Tuple:
'''simple docstring'''
if str(A_ ).startswith('''mps''' ):
__lowerCAmelCase : str = torch.manual_seed(A_ )
else:
__lowerCAmelCase : Dict = torch.Generator(device=A_ ).manual_seed(A_ )
__lowerCAmelCase : Tuple = {
'''batch_size''': 1,
'''num_inference_steps''': None,
'''timesteps''': [22, 0],
'''generator''': generator,
'''output_type''': '''np''',
}
return inputs
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase : Tuple = self.get_dummy_components()
__lowerCAmelCase : Union[str, Any] = ConsistencyModelPipeline(**A_ )
__lowerCAmelCase : List[str] = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
__lowerCAmelCase : Any = self.get_dummy_inputs(A_ )
__lowerCAmelCase : int = pipe(**A_ ).images
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase : Tuple = image[0, -3:, -3:, -1]
__lowerCAmelCase : str = np.array([0.3_572, 0.6_273, 0.4_031, 0.3_961, 0.4_321, 0.5_730, 0.5_266, 0.4_780, 0.5_004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase : str = self.get_dummy_components(class_cond=A_ )
__lowerCAmelCase : List[str] = ConsistencyModelPipeline(**A_ )
__lowerCAmelCase : List[Any] = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
__lowerCAmelCase : Optional[Any] = self.get_dummy_inputs(A_ )
__lowerCAmelCase : Optional[int] = 0
__lowerCAmelCase : int = pipe(**A_ ).images
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
__lowerCAmelCase : List[str] = np.array([0.3_572, 0.6_273, 0.4_031, 0.3_961, 0.4_321, 0.5_730, 0.5_266, 0.4_780, 0.5_004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase : Union[str, Any] = self.get_dummy_components()
__lowerCAmelCase : List[Any] = ConsistencyModelPipeline(**A_ )
__lowerCAmelCase : int = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
__lowerCAmelCase : Tuple = self.get_dummy_inputs(A_ )
__lowerCAmelCase : Any = 1
__lowerCAmelCase : List[Any] = None
__lowerCAmelCase : Dict = pipe(**A_ ).images
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase : Any = image[0, -3:, -3:, -1]
__lowerCAmelCase : List[Any] = np.array([0.5_004, 0.5_004, 0.4_994, 0.5_008, 0.4_976, 0.5_018, 0.4_990, 0.4_982, 0.4_987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase : Optional[Any] = self.get_dummy_components(class_cond=A_ )
__lowerCAmelCase : List[str] = ConsistencyModelPipeline(**A_ )
__lowerCAmelCase : Union[str, Any] = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
__lowerCAmelCase : Any = self.get_dummy_inputs(A_ )
__lowerCAmelCase : List[str] = 1
__lowerCAmelCase : Dict = None
__lowerCAmelCase : Tuple = 0
__lowerCAmelCase : Dict = pipe(**A_ ).images
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase : List[Any] = image[0, -3:, -3:, -1]
__lowerCAmelCase : Any = np.array([0.5_004, 0.5_004, 0.4_994, 0.5_008, 0.4_976, 0.5_018, 0.4_990, 0.4_982, 0.4_987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class __lowercase (unittest.TestCase ):
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self , A_=0 , A_=False , A_="cpu" , A_=torch.floataa , A_=(1, 3, 64, 64) ) ->str:
'''simple docstring'''
__lowerCAmelCase : Dict = torch.manual_seed(A_ )
__lowerCAmelCase : Tuple = {
'''num_inference_steps''': None,
'''timesteps''': [22, 0],
'''class_labels''': 0,
'''generator''': generator,
'''output_type''': '''np''',
}
if get_fixed_latents:
__lowerCAmelCase : List[str] = self.get_fixed_latents(seed=A_ , device=A_ , dtype=A_ , shape=A_ )
__lowerCAmelCase : Union[str, Any] = latents
return inputs
def UpperCamelCase__ ( self , A_=0 , A_="cpu" , A_=torch.floataa , A_=(1, 3, 64, 64) ) ->Optional[int]:
'''simple docstring'''
if type(A_ ) == str:
__lowerCAmelCase : int = torch.device(A_ )
__lowerCAmelCase : Optional[Any] = torch.Generator(device=A_ ).manual_seed(A_ )
__lowerCAmelCase : Union[str, Any] = randn_tensor(A_ , generator=A_ , device=A_ , dtype=A_ )
return latents
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : int = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
__lowerCAmelCase : List[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__lowerCAmelCase : List[str] = ConsistencyModelPipeline(unet=A_ , scheduler=A_ )
pipe.to(torch_device=A_ )
pipe.set_progress_bar_config(disable=A_ )
__lowerCAmelCase : str = self.get_inputs()
__lowerCAmelCase : Any = pipe(**A_ ).images
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase : Dict = image[0, -3:, -3:, -1]
__lowerCAmelCase : Optional[int] = np.array([0.0_888, 0.0_881, 0.0_666, 0.0_479, 0.0_292, 0.0_195, 0.0_201, 0.0_163, 0.0_254] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
__lowerCAmelCase : int = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
__lowerCAmelCase : Optional[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__lowerCAmelCase : List[str] = ConsistencyModelPipeline(unet=A_ , scheduler=A_ )
pipe.to(torch_device=A_ )
pipe.set_progress_bar_config(disable=A_ )
__lowerCAmelCase : List[Any] = self.get_inputs()
__lowerCAmelCase : Tuple = 1
__lowerCAmelCase : Optional[Any] = None
__lowerCAmelCase : str = pipe(**A_ ).images
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
__lowerCAmelCase : List[Any] = np.array([0.0_340, 0.0_152, 0.0_063, 0.0_267, 0.0_221, 0.0_107, 0.0_416, 0.0_186, 0.0_217] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : Tuple = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
__lowerCAmelCase : List[str] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__lowerCAmelCase : Union[str, Any] = ConsistencyModelPipeline(unet=A_ , scheduler=A_ )
pipe.to(torch_device=A_ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=A_ )
__lowerCAmelCase : Any = self.get_inputs(get_fixed_latents=A_ , device=A_ )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=A_ , enable_math=A_ , enable_mem_efficient=A_ ):
__lowerCAmelCase : Dict = pipe(**A_ ).images
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase : Dict = image[0, -3:, -3:, -1]
__lowerCAmelCase : Optional[int] = np.array([0.1_875, 0.1_428, 0.1_289, 0.2_151, 0.2_092, 0.1_477, 0.1_877, 0.1_641, 0.1_353] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
__lowerCAmelCase : List[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__lowerCAmelCase : Union[str, Any] = ConsistencyModelPipeline(unet=A_ , scheduler=A_ )
pipe.to(torch_device=A_ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=A_ )
__lowerCAmelCase : Union[str, Any] = self.get_inputs(get_fixed_latents=A_ , device=A_ )
__lowerCAmelCase : Any = 1
__lowerCAmelCase : int = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=A_ , enable_math=A_ , enable_mem_efficient=A_ ):
__lowerCAmelCase : int = pipe(**A_ ).images
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase : str = image[0, -3:, -3:, -1]
__lowerCAmelCase : Any = np.array([0.1_663, 0.1_948, 0.2_275, 0.1_680, 0.1_204, 0.1_245, 0.1_858, 0.1_338, 0.2_095] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 275 | 0 |
"""simple docstring"""
import string
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ) -> None:
for key in range(len(string.ascii_uppercase ) ):
_lowerCAmelCase : List[Any] = """"""
for symbol in message:
if symbol in string.ascii_uppercase:
_lowerCAmelCase : List[Any] = string.ascii_uppercase.find(_lowerCamelCase )
_lowerCAmelCase : List[str] = num - key
if num < 0:
_lowerCAmelCase : int = num + len(string.ascii_uppercase )
_lowerCAmelCase : List[str] = translated + string.ascii_uppercase[num]
else:
_lowerCAmelCase : int = translated + symbol
print(f"Decryption using Key #{key}: {translated}" )
def SCREAMING_SNAKE_CASE ( ) -> None:
_lowerCAmelCase : int = input("""Encrypted message: """ )
_lowerCAmelCase : Dict = message.upper()
decrypt(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 126 | """simple docstring"""
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ,_lowerCamelCase : Any ,_lowerCamelCase : Optional[Any] ) -> str:
_lowerCAmelCase : str = AutoConfig.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : int = FlaxAutoModelForSeqaSeqLM.from_config(config=_lowerCamelCase )
_lowerCAmelCase : Any = checkpoints.load_tax_checkpoint(_lowerCamelCase )
_lowerCAmelCase : Tuple = """wi_0""" in tax_model["""target"""]["""encoder"""]["""layers_0"""]["""mlp"""]
if config.model_type == "t5":
_lowerCAmelCase : Tuple = """SelfAttention"""
if config.model_type == "longt5" and config.encoder_attention_type == "local":
_lowerCAmelCase : Optional[Any] = """LocalSelfAttention"""
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_lowerCAmelCase : Union[str, Any] = """TransientGlobalSelfAttention"""
else:
raise ValueError(
"""Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"""
""" attribute with a value from ['local', 'transient-global].""" )
# Encoder
for layer_index in range(config.num_layers ):
_lowerCAmelCase : Tuple = f"layers_{str(_lowerCamelCase )}"
# Self-Attention
_lowerCAmelCase : List[str] = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""key"""]["""kernel"""]
_lowerCAmelCase : str = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""out"""]["""kernel"""]
_lowerCAmelCase : str = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""query"""]["""kernel"""]
_lowerCAmelCase : Any = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""value"""]["""kernel"""]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_lowerCAmelCase : Dict = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""T5LayerNorm_0"""]["""scale"""]
# Layer Normalization
_lowerCAmelCase : Any = tax_model["""target"""]["""encoder"""][layer_name]["""pre_attention_layer_norm"""]["""scale"""]
if split_mlp_wi:
_lowerCAmelCase : List[str] = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi_0"""]["""kernel"""]
_lowerCAmelCase : int = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi_1"""]["""kernel"""]
else:
_lowerCAmelCase : List[str] = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi"""]["""kernel"""]
_lowerCAmelCase : Tuple = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wo"""]["""kernel"""]
# Layer Normalization
_lowerCAmelCase : Tuple = tax_model["""target"""]["""encoder"""][layer_name]["""pre_mlp_layer_norm"""]["""scale"""]
# Assigning
_lowerCAmelCase : Any = flax_model.params["""encoder"""]["""block"""][str(_lowerCamelCase )]["""layer"""]
_lowerCAmelCase : Any = tax_attention_key
_lowerCAmelCase : str = tax_attention_out
_lowerCAmelCase : Union[str, Any] = tax_attention_query
_lowerCAmelCase : Optional[Any] = tax_attention_value
_lowerCAmelCase : List[str] = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_lowerCAmelCase : Any = tax_global_layer_norm
if split_mlp_wi:
_lowerCAmelCase : Dict = tax_mlp_wi_a
_lowerCAmelCase : List[Any] = tax_mlp_wi_a
else:
_lowerCAmelCase : List[str] = tax_mlp_wi
_lowerCAmelCase : str = tax_mlp_wo
_lowerCAmelCase : Optional[Any] = tax_mlp_layer_norm
_lowerCAmelCase : Any = flax_model_encoder_layer_block
# Only for layer 0:
_lowerCAmelCase : Union[str, Any] = tax_model["""target"""]["""encoder"""]["""relpos_bias"""]["""rel_embedding"""].T
_lowerCAmelCase : Optional[Any] = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_lowerCAmelCase : List[str] = tax_model["""target"""]["""encoder"""]["""side_relpos_bias"""]["""rel_embedding"""].T
_lowerCAmelCase : Optional[int] = tax_encoder_global_rel_embedding
# Assigning
_lowerCAmelCase : Any = tax_model["""target"""]["""encoder"""]["""encoder_norm"""]["""scale"""]
_lowerCAmelCase : Any = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
_lowerCAmelCase : Optional[int] = f"layers_{str(_lowerCamelCase )}"
# Self-Attention
_lowerCAmelCase : List[str] = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""key"""]["""kernel"""]
_lowerCAmelCase : int = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""out"""]["""kernel"""]
_lowerCAmelCase : List[Any] = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""query"""]["""kernel"""]
_lowerCAmelCase : str = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""value"""]["""kernel"""]
# Layer Normalization
_lowerCAmelCase : Optional[Any] = tax_model["""target"""]["""decoder"""][layer_name]["""pre_self_attention_layer_norm"""][
"""scale"""
]
# Encoder-Decoder-Attention
_lowerCAmelCase : List[Any] = tax_model["""target"""]["""decoder"""][layer_name]["""encoder_decoder_attention"""]
_lowerCAmelCase : List[str] = tax_enc_dec_attention_module["""key"""]["""kernel"""]
_lowerCAmelCase : List[Any] = tax_enc_dec_attention_module["""out"""]["""kernel"""]
_lowerCAmelCase : List[str] = tax_enc_dec_attention_module["""query"""]["""kernel"""]
_lowerCAmelCase : Dict = tax_enc_dec_attention_module["""value"""]["""kernel"""]
# Layer Normalization
_lowerCAmelCase : Any = tax_model["""target"""]["""decoder"""][layer_name]["""pre_cross_attention_layer_norm"""]["""scale"""]
# MLP
if split_mlp_wi:
_lowerCAmelCase : Any = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi_0"""]["""kernel"""]
_lowerCAmelCase : List[str] = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi_1"""]["""kernel"""]
else:
_lowerCAmelCase : Any = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi"""]["""kernel"""]
_lowerCAmelCase : Optional[int] = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wo"""]["""kernel"""]
# Layer Normalization
_lowerCAmelCase : Optional[int] = tax_model["""target"""]["""decoder"""][layer_name]["""pre_mlp_layer_norm"""]["""scale"""]
# Assigning
_lowerCAmelCase : str = flax_model.params["""decoder"""]["""block"""][str(_lowerCamelCase )]["""layer"""]
_lowerCAmelCase : int = tax_attention_key
_lowerCAmelCase : List[str] = tax_attention_out
_lowerCAmelCase : Optional[Any] = tax_attention_query
_lowerCAmelCase : Dict = tax_attention_value
_lowerCAmelCase : str = tax_pre_attention_layer_norm
_lowerCAmelCase : List[Any] = tax_enc_dec_attention_key
_lowerCAmelCase : List[Any] = tax_enc_dec_attention_out
_lowerCAmelCase : Tuple = tax_enc_dec_attention_query
_lowerCAmelCase : Any = tax_enc_dec_attention_value
_lowerCAmelCase : Dict = tax_cross_layer_norm
if split_mlp_wi:
_lowerCAmelCase : Dict = tax_mlp_wi_a
_lowerCAmelCase : int = tax_mlp_wi_a
else:
_lowerCAmelCase : Optional[int] = tax_mlp_wi
_lowerCAmelCase : Dict = tax_mlp_wo
_lowerCAmelCase : List[Any] = txa_mlp_layer_norm
_lowerCAmelCase : Optional[Any] = flax_model_decoder_layer_block
# Decoder Normalization
_lowerCAmelCase : Any = tax_model["""target"""]["""decoder"""]["""decoder_norm"""]["""scale"""]
_lowerCAmelCase : List[str] = txa_decoder_norm
# Only for layer 0:
_lowerCAmelCase : Optional[int] = tax_model["""target"""]["""decoder"""]["""relpos_bias"""]["""rel_embedding"""].T
_lowerCAmelCase : Union[str, Any] = tax_decoder_rel_embedding
# Token Embeddings
_lowerCAmelCase : Optional[int] = tax_model["""target"""]["""token_embedder"""]["""embedding"""]
_lowerCAmelCase : Optional[int] = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
_lowerCAmelCase : Tuple = tax_model["""target"""]["""decoder"""]["""logits_dense"""]["""kernel"""]
flax_model.save_pretrained(_lowerCamelCase )
print("""T5X Model was sucessfully converted!""" )
if __name__ == "__main__":
_a : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.'
)
parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.')
parser.add_argument(
'--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.'
)
_a : List[str] = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 126 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.