code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
if isinstance(_UpperCamelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class __snake_case :
def __a ( self : str , _lowercase : Union[str, Any] , _lowercase : Tuple ):
"""simple docstring"""
pass
def __a ( self : Optional[Any] ):
"""simple docstring"""
pass
def __a ( self : Union[str, Any] ):
"""simple docstring"""
pass
def __a ( self : Any , _lowercase : List[str] , _lowercase : int , _lowercase : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = np.abs((a - b) ).max()
self.assertLessEqual(_lowercase , _lowercase , f"""Difference between torch and flax is {diff} (>= {tol}).""" )
def __a ( self : Union[str, Any] , _lowercase : Optional[int] , _lowercase : Union[str, Any] , _lowercase : int , _lowercase : List[str] , _lowercase : Tuple=None , **_lowercase : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderConfig.from_vision_text_configs(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ = FlaxVisionTextDualEncoderModel(_lowercase )
SCREAMING_SNAKE_CASE__ = model(input_ids=_lowercase , pixel_values=_lowercase , attention_mask=_lowercase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def __a ( self : Optional[Any] , _lowercase : Dict , _lowercase : Optional[int] , _lowercase : Optional[Any] , _lowercase : List[str] , _lowercase : Optional[int]=None , **_lowercase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.get_vision_text_model(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ = {"""vision_model""": vision_model, """text_model""": text_model}
SCREAMING_SNAKE_CASE__ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowercase )
SCREAMING_SNAKE_CASE__ = model(input_ids=_lowercase , pixel_values=_lowercase , attention_mask=_lowercase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __a ( self : Dict , _lowercase : int , _lowercase : Optional[Any] , _lowercase : int , _lowercase : Union[str, Any] , _lowercase : Dict=None , **_lowercase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.get_vision_text_model(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ = {"""vision_model""": vision_model, """text_model""": text_model}
SCREAMING_SNAKE_CASE__ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowercase )
SCREAMING_SNAKE_CASE__ = model(input_ids=_lowercase , pixel_values=_lowercase , attention_mask=_lowercase )
SCREAMING_SNAKE_CASE__ = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowercase )
SCREAMING_SNAKE_CASE__ = FlaxVisionTextDualEncoderModel.from_pretrained(_lowercase )
SCREAMING_SNAKE_CASE__ = model(input_ids=_lowercase , pixel_values=_lowercase , attention_mask=_lowercase )
SCREAMING_SNAKE_CASE__ = after_output[0]
SCREAMING_SNAKE_CASE__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowercase , 1E-3 )
def __a ( self : str , _lowercase : Tuple , _lowercase : Optional[Any] , _lowercase : Optional[int] , _lowercase : Union[str, Any] , _lowercase : str=None , **_lowercase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.get_vision_text_model(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ = {"""vision_model""": vision_model, """text_model""": text_model}
SCREAMING_SNAKE_CASE__ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowercase )
SCREAMING_SNAKE_CASE__ = model(
input_ids=_lowercase , pixel_values=_lowercase , attention_mask=_lowercase , output_attentions=_lowercase )
SCREAMING_SNAKE_CASE__ = output.vision_model_output.attentions
self.assertEqual(len(_lowercase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE__ = to_atuple(vision_model.config.image_size )
SCREAMING_SNAKE_CASE__ = to_atuple(vision_model.config.patch_size )
SCREAMING_SNAKE_CASE__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
SCREAMING_SNAKE_CASE__ = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
SCREAMING_SNAKE_CASE__ = output.text_model_output.attentions
self.assertEqual(len(_lowercase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __a ( self : str , _lowercase : Dict , _lowercase : Dict , _lowercase : str ):
"""simple docstring"""
pt_model.to(_lowercase )
pt_model.eval()
# prepare inputs
SCREAMING_SNAKE_CASE__ = inputs_dict
SCREAMING_SNAKE_CASE__ = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = pt_model(**_lowercase ).to_tuple()
SCREAMING_SNAKE_CASE__ = fx_model(**_lowercase ).to_tuple()
self.assertEqual(len(_lowercase ) , len(_lowercase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(_lowercase , pt_output.numpy() , 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_lowercase )
SCREAMING_SNAKE_CASE__ = FlaxVisionTextDualEncoderModel.from_pretrained(_lowercase , from_pt=_lowercase )
SCREAMING_SNAKE_CASE__ = fx_model_loaded(**_lowercase ).to_tuple()
self.assertEqual(len(_lowercase ) , len(_lowercase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(_lowercase , pt_output.numpy() , 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_lowercase )
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderModel.from_pretrained(_lowercase , from_flax=_lowercase )
pt_model_loaded.to(_lowercase )
pt_model_loaded.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = pt_model_loaded(**_lowercase ).to_tuple()
self.assertEqual(len(_lowercase ) , len(_lowercase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(_lowercase , pt_output_loaded.numpy() , 4E-2 )
def __a ( self : Optional[int] , _lowercase : Tuple , _lowercase : Any , _lowercase : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderConfig.from_vision_text_configs(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderModel(_lowercase )
SCREAMING_SNAKE_CASE__ = FlaxVisionTextDualEncoderModel(_lowercase )
SCREAMING_SNAKE_CASE__ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _lowercase )
SCREAMING_SNAKE_CASE__ = fx_state
self.check_pt_flax_equivalence(_lowercase , _lowercase , _lowercase )
def __a ( self : List[Any] , _lowercase : str , _lowercase : str , _lowercase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderConfig.from_vision_text_configs(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderModel(_lowercase )
SCREAMING_SNAKE_CASE__ = FlaxVisionTextDualEncoderModel(_lowercase )
SCREAMING_SNAKE_CASE__ = load_flax_weights_in_pytorch_model(_lowercase , fx_model.params )
self.check_pt_flax_equivalence(_lowercase , _lowercase , _lowercase )
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_lowercase )
def __a ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_lowercase )
def __a ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
self.check_save_load(**_lowercase )
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_lowercase )
@is_pt_flax_cross_test
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ = config_inputs_dict.pop("""vision_config""" )
SCREAMING_SNAKE_CASE__ = config_inputs_dict.pop("""text_config""" )
SCREAMING_SNAKE_CASE__ = config_inputs_dict
self.check_equivalence_pt_to_flax(_lowercase , _lowercase , _lowercase )
self.check_equivalence_flax_to_pt(_lowercase , _lowercase , _lowercase )
@slow
def __a ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.get_pretrained_model_and_inputs()
SCREAMING_SNAKE_CASE__ = model_a(**_lowercase )
SCREAMING_SNAKE_CASE__ = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_lowercase )
SCREAMING_SNAKE_CASE__ = FlaxVisionTextDualEncoderModel.from_pretrained(_lowercase )
SCREAMING_SNAKE_CASE__ = model_a(**_lowercase )
SCREAMING_SNAKE_CASE__ = after_outputs[0]
SCREAMING_SNAKE_CASE__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowercase , 1E-5 )
@require_flax
class __snake_case ( lowerCamelCase_ , unittest.TestCase ):
def __a ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=_lowercase , text_from_pt=_lowercase , )
SCREAMING_SNAKE_CASE__ = 13
SCREAMING_SNAKE_CASE__ = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
SCREAMING_SNAKE_CASE__ = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
SCREAMING_SNAKE_CASE__ = random_attention_mask([batch_size, 4] )
SCREAMING_SNAKE_CASE__ = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def __a ( self : Tuple , _lowercase : Optional[Any] , _lowercase : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaxViTModel(_lowercase )
SCREAMING_SNAKE_CASE__ = FlaxBertModel(_lowercase )
return vision_model, text_model
def __a ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaxViTModelTester(self )
SCREAMING_SNAKE_CASE__ = FlaxBertModelTester(self )
SCREAMING_SNAKE_CASE__ = vit_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ = bert_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = vision_config_and_inputs
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class __snake_case ( lowerCamelCase_ , unittest.TestCase ):
def __a ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-clip""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=_lowercase , text_from_pt=_lowercase , )
SCREAMING_SNAKE_CASE__ = 13
SCREAMING_SNAKE_CASE__ = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
SCREAMING_SNAKE_CASE__ = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
SCREAMING_SNAKE_CASE__ = random_attention_mask([batch_size, 4] )
SCREAMING_SNAKE_CASE__ = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def __a ( self : int , _lowercase : Optional[int] , _lowercase : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaxCLIPVisionModel(_lowercase )
SCREAMING_SNAKE_CASE__ = FlaxBertModel(_lowercase )
return vision_model, text_model
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaxCLIPVisionModelTester(self )
SCREAMING_SNAKE_CASE__ = FlaxBertModelTester(self )
SCREAMING_SNAKE_CASE__ = clip_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ = bert_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = vision_config_and_inputs
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class __snake_case ( unittest.TestCase ):
@slow
def __a ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaxVisionTextDualEncoderModel.from_pretrained("""clip-italian/clip-italian""" , logit_scale_init_value=1.0 )
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
SCREAMING_SNAKE_CASE__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
SCREAMING_SNAKE_CASE__ = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=_lowercase , padding=_lowercase , return_tensors="""np""" )
SCREAMING_SNAKE_CASE__ = model(**_lowercase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
SCREAMING_SNAKE_CASE__ = np.array([[1.2_28_47_27, 0.3_10_41_22]] )
self.assertTrue(np.allclose(outputs.logits_per_image , _lowercase , atol=1E-3 ) )
| 219 | """simple docstring"""
import argparse
import copy
def lowerCAmelCase__ ( _UpperCamelCase : List[Any] ) -> str:
"""simple docstring"""
snake_case = {}
with open(_UpperCamelCase ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
snake_case = []
_list.append([line.split()[1], line.split()[2]] )
snake_case = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
snake_case = []
_list.append([line.split()[0], line.split()[2]] )
snake_case = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def lowerCAmelCase__ ( _UpperCamelCase : List[str] , _UpperCamelCase : Tuple ) -> Tuple:
"""simple docstring"""
with open(_UpperCamelCase ) as f:
snake_case = f.read(1 )
snake_case = start_node
snake_case = []
snake_case = start_node
snake_case = 0
while visiting not in first_solution:
snake_case = 1_0_0_0_0
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(_UpperCamelCase ) and k[0] not in first_solution:
snake_case = k[1]
snake_case = k[0]
first_solution.append(_UpperCamelCase )
snake_case = distance_of_first_solution + int(_UpperCamelCase )
snake_case = best_node
first_solution.append(_UpperCamelCase )
snake_case = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
snake_case = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0_0_0_0
)
return first_solution, distance_of_first_solution
def lowerCAmelCase__ ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
snake_case = []
for n in solution[1:-1]:
snake_case = solution.index(_UpperCamelCase )
for kn in solution[1:-1]:
snake_case = solution.index(_UpperCamelCase )
if n == kn:
continue
snake_case = copy.deepcopy(_UpperCamelCase )
snake_case = kn
snake_case = n
snake_case = 0
for k in _tmp[:-1]:
snake_case = _tmp[_tmp.index(_UpperCamelCase ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
snake_case = distance + int(i[1] )
_tmp.append(_UpperCamelCase )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
snake_case = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda _UpperCamelCase : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def lowerCAmelCase__ ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : int , _UpperCamelCase : List[Any] , _UpperCamelCase : int , _UpperCamelCase : str ) -> Union[str, Any]:
"""simple docstring"""
snake_case = 1
snake_case = first_solution
snake_case = []
snake_case = distance_of_first_solution
snake_case = solution
while count <= iters:
snake_case = find_neighborhood(_UpperCamelCase , _UpperCamelCase )
snake_case = 0
snake_case = neighborhood[index_of_best_solution]
snake_case = len(_UpperCamelCase ) - 1
snake_case = False
while not found:
snake_case = 0
while i < len(_UpperCamelCase ):
if best_solution[i] != solution[i]:
snake_case = best_solution[i]
snake_case = solution[i]
break
snake_case = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
snake_case = True
snake_case = best_solution[:-1]
snake_case = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
snake_case = cost
snake_case = solution
else:
snake_case = index_of_best_solution + 1
snake_case = neighborhood[index_of_best_solution]
if len(_UpperCamelCase ) >= size:
tabu_list.pop(0 )
snake_case = count + 1
return best_solution_ever, best_cost
def lowerCAmelCase__ ( _UpperCamelCase : int=None ) -> List[str]:
"""simple docstring"""
snake_case = generate_neighbours(args.File )
snake_case ,snake_case = generate_first_solution(
args.File , _UpperCamelCase )
snake_case ,snake_case = tabu_search(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , args.Iterations , args.Size , )
print(f"""Best solution: {best_sol}, with total distance: {best_cost}.""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser(description="Tabu Search")
parser.add_argument(
"-f",
"--File",
type=str,
help="Path to the file containing the data",
required=True,
)
parser.add_argument(
"-i",
"--Iterations",
type=int,
help="How many iterations the algorithm should perform",
required=True,
)
parser.add_argument(
"-s", "--Size", type=int, help="Size of the tabu list", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 150 | 0 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
class a__ ( lowerCamelCase_ ):
_SCREAMING_SNAKE_CASE : Tuple = 'encoder-decoder'
_SCREAMING_SNAKE_CASE : List[str] = True
def __init__( self , **_UpperCamelCase ):
"""simple docstring"""
super().__init__(**_UpperCamelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
_lowercase : Optional[int] = kwargs.pop("encoder" )
_lowercase : Union[str, Any] = encoder_config.pop("model_type" )
_lowercase : Union[str, Any] = kwargs.pop("decoder" )
_lowercase : Dict = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
_lowercase : List[Any] = AutoConfig.for_model(_UpperCamelCase , **_UpperCamelCase )
_lowercase : int = AutoConfig.for_model(_UpperCamelCase , **_UpperCamelCase )
_lowercase : Optional[int] = True
@classmethod
def _lowerCamelCase ( cls , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
logger.info("Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" )
_lowercase : Dict = True
_lowercase : Union[str, Any] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Union[str, Any] = copy.deepcopy(self.__dict__ )
_lowercase : str = self.encoder.to_dict()
_lowercase : Any = self.decoder.to_dict()
_lowercase : Union[str, Any] = self.__class__.model_type
return output
| 359 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
_snake_case = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.' )
@require_torch
@require_tf
@slow
class a__ ( unittest.TestCase ):
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = True , ):
"""simple docstring"""
_lowercase : str = [file for file in os.listdir(_UpperCamelCase ) if os.path.isfile(os.path.join(_UpperCamelCase , _UpperCamelCase ) )]
if identifier is not None:
_lowercase : str = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(_UpperCamelCase , _UpperCamelCase ):
for n_ in n_identifier:
_lowercase : Dict = [file for file in files if n_ not in file]
else:
_lowercase : Optional[Any] = [file for file in files if n_identifier not in file]
_lowercase : Dict = ignore_files or []
ignore_files.append("__init__.py" )
_lowercase : List[str] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing" , _UpperCamelCase )
if only_modules:
_lowercase : Optional[Any] = file.split("." )[0]
try:
_lowercase : Union[str, Any] = getattr(_UpperCamelCase , _UpperCamelCase )
_lowercase : Optional[int] = doctest.DocTestSuite(_UpperCamelCase )
_lowercase : Tuple = unittest.TextTestRunner().run(_UpperCamelCase )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f'''{module_identifier} is not a module.''' )
else:
_lowercase : Optional[Any] = doctest.testfile(str(".." / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Tuple = Path("src/transformers" )
_lowercase : str = "modeling"
_lowercase : Tuple = [
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(_UpperCamelCase , identifier=_UpperCamelCase , ignore_files=_UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Optional[int] = Path("src/transformers" )
_lowercase : Any = "tokenization"
self.analyze_directory(_UpperCamelCase , identifier=_UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Tuple = Path("src/transformers" )
_lowercase : Optional[Any] = "configuration"
self.analyze_directory(_UpperCamelCase , identifier=_UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Union[str, Any] = Path("src/transformers" )
_lowercase : List[Any] = ["configuration", "modeling", "tokenization"]
self.analyze_directory(_UpperCamelCase , n_identifier=_UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Dict = Path("docs/source" )
_lowercase : int = ["favicon.ico"]
self.analyze_directory(_UpperCamelCase , ignore_files=_UpperCamelCase , only_modules=_UpperCamelCase )
| 199 | 0 |
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
UpperCAmelCase_ : List[str] = 'scheduler_config.json'
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Optional[Any] = 1
snake_case__ : Tuple = 2
snake_case__ : List[str] = 3
snake_case__ : int = 4
snake_case__ : str = 5
snake_case__ : Tuple = 6
snake_case__ : Optional[Any] = 7
snake_case__ : Optional[Any] = 8
snake_case__ : Optional[Any] = 9
snake_case__ : Tuple = 10
snake_case__ : Union[str, Any] = 11
snake_case__ : List[Any] = 12
snake_case__ : Optional[int] = 13
snake_case__ : List[str] = 14
@dataclass
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : torch.FloatTensor
class SCREAMING_SNAKE_CASE__ :
snake_case__ : Optional[Any] = SCHEDULER_CONFIG_NAME
snake_case__ : Union[str, Any] = []
snake_case__ : str = True
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict[str, Any] = None , SCREAMING_SNAKE_CASE__ : Optional[str] = None , SCREAMING_SNAKE_CASE__ : List[Any]=False , **SCREAMING_SNAKE_CASE__ : Optional[int] , ) -> Optional[int]:
a_ , a_ , a_ : Optional[Any] = cls.load_config(
pretrained_model_name_or_path=SCREAMING_SNAKE_CASE__ , subfolder=SCREAMING_SNAKE_CASE__ , return_unused_kwargs=SCREAMING_SNAKE_CASE__ , return_commit_hash=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
return cls.from_config(SCREAMING_SNAKE_CASE__ , return_unused_kwargs=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Any , SCREAMING_SNAKE_CASE__ : Union[str, os.PathLike] , SCREAMING_SNAKE_CASE__ : bool = False , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[Any]:
self.save_config(save_directory=SCREAMING_SNAKE_CASE__ , push_to_hub=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
return self._get_compatibles()
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Dict ) -> Optional[Any]:
a_ : int = list(set([cls.__name__] + cls._compatibles ) )
a_ : Tuple = importlib.import_module(__name__.split('.' )[0] )
a_ : List[str] = [
getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for c in compatible_classes_str if hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
]
return compatible_classes
| 32 |
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : str = 'T5Config'
def SCREAMING_SNAKE_CASE_ ( __A : jnp.array , __A : int , __A : int ) -> jnp.ndarray:
"""simple docstring"""
a_ : Dict = jnp.zeros_like(__A )
a_ : Dict = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
a_ : str = shifted_input_ids.at[:, 0].set(__A )
a_ : int = jnp.where(shifted_input_ids == -1_00 , __A , __A )
return shifted_input_ids
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : str = '''mt5'''
snake_case__ : List[Any] = MTaConfig
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : str = '''mt5'''
snake_case__ : List[str] = MTaConfig
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Any = '''mt5'''
snake_case__ : Union[str, Any] = MTaConfig
| 32 | 1 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
__UpperCAmelCase : List[str] = (3, 9, -11, 0, 7, 5, 1, -1)
__UpperCAmelCase : List[str] = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
__UpperCamelCase : int
__UpperCamelCase : Node | None
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Node | None = None
for i in sorted(__SCREAMING_SNAKE_CASE , reverse=__SCREAMING_SNAKE_CASE ):
UpperCamelCase : int = Node(__SCREAMING_SNAKE_CASE , self.head )
def __iter__( self ):
"""simple docstring"""
UpperCamelCase : str = self.head
while node:
yield node.data
UpperCamelCase : Tuple = node.next_node
def __len__( self ):
"""simple docstring"""
return sum(1 for _ in self )
def __str__( self ):
"""simple docstring"""
return " -> ".join([str(__SCREAMING_SNAKE_CASE ) for node in self] )
def a ( SCREAMING_SNAKE_CASE_ : SortedLinkedList , SCREAMING_SNAKE_CASE_ : SortedLinkedList ):
"""simple docstring"""
return SortedLinkedList(list(SCREAMING_SNAKE_CASE_ ) + list(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase : List[str] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 315 |
import collections
import os
import re
from pathlib import Path
__UpperCAmelCase : List[str] = "src/transformers"
# Matches is_xxx_available()
__UpperCAmelCase : int = re.compile(r"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
__UpperCAmelCase : Optional[int] = re.compile(r"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__UpperCAmelCase : List[Any] = re.compile(r"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
__UpperCAmelCase : List[Any] = re.compile(r"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
__UpperCAmelCase : str = re.compile(r"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__UpperCAmelCase : Union[str, Any] = re.compile(r"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
__UpperCAmelCase : Dict = re.compile(r"^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
__UpperCAmelCase : str = re.compile(r"^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
__UpperCAmelCase : str = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
__UpperCAmelCase : Any = re.compile(r"^\s*try:")
# Catches a line with else:
__UpperCAmelCase : List[Any] = re.compile(r"^\s*else:")
def a ( SCREAMING_SNAKE_CASE_ : Dict ):
"""simple docstring"""
if _re_test_backend.search(SCREAMING_SNAKE_CASE_ ) is None:
return None
UpperCamelCase : Union[str, Any] = [b[0] for b in _re_backend.findall(SCREAMING_SNAKE_CASE_ )]
backends.sort()
return "_and_".join(SCREAMING_SNAKE_CASE_ )
def a ( SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCamelCase : Tuple = f.readlines()
UpperCamelCase : Tuple = 0
while line_index < len(SCREAMING_SNAKE_CASE_ ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(SCREAMING_SNAKE_CASE_ ):
return None
# First grab the objects without a specific backend in _import_structure
UpperCamelCase : List[Any] = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
UpperCamelCase : Optional[int] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[int] = _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE_ ).groups()[0]
UpperCamelCase : str = re.findall(R'''\[([^\]]+)\]''' , SCREAMING_SNAKE_CASE_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
UpperCamelCase : List[Any] = _re_import_struct_key_value.search(SCREAMING_SNAKE_CASE_ )
if single_line_import_search is not None:
UpperCamelCase : List[str] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(SCREAMING_SNAKE_CASE_ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE_ )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
UpperCamelCase : Dict = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
UpperCamelCase : Dict = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCamelCase : Optional[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCamelCase : str = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
UpperCamelCase : str = lines[line_index]
if _re_import_struct_add_one.search(SCREAMING_SNAKE_CASE_ ) is not None:
objects.append(_re_import_struct_add_one.search(SCREAMING_SNAKE_CASE_ ).groups()[0] )
elif _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE_ ) is not None:
UpperCamelCase : Union[str, Any] = _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE_ ).groups()[0].split(''', ''' )
UpperCamelCase : List[Any] = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE_ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE_ )
elif _re_between_brackets.search(SCREAMING_SNAKE_CASE_ ) is not None:
UpperCamelCase : str = _re_between_brackets.search(SCREAMING_SNAKE_CASE_ ).groups()[0].split(''', ''' )
UpperCamelCase : Dict = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE_ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE_ )
elif _re_quote_object.search(SCREAMING_SNAKE_CASE_ ) is not None:
objects.append(_re_quote_object.search(SCREAMING_SNAKE_CASE_ ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 1_2 + '''"''' ):
objects.append(line[1_3:-3] )
line_index += 1
UpperCamelCase : Tuple = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
UpperCamelCase : int = []
while (
line_index < len(SCREAMING_SNAKE_CASE_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
UpperCamelCase : Tuple = lines[line_index]
UpperCamelCase : Any = _re_import.search(SCREAMING_SNAKE_CASE_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
UpperCamelCase : Any = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(SCREAMING_SNAKE_CASE_ ):
# If the line is an if is_backend_available, we grab all objects associated.
UpperCamelCase : Optional[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCamelCase : Dict = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCamelCase : Dict = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
UpperCamelCase : Optional[Any] = lines[line_index]
UpperCamelCase : str = _re_import.search(SCREAMING_SNAKE_CASE_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 1_2 ):
objects.append(line[1_2:-2] )
line_index += 1
UpperCamelCase : str = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
def find_duplicates(SCREAMING_SNAKE_CASE_ : Any ):
return [k for k, v in collections.Counter(SCREAMING_SNAKE_CASE_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
UpperCamelCase : Dict = []
for key in import_dict_objects.keys():
UpperCamelCase : Union[str, Any] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
UpperCamelCase : Dict = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
UpperCamelCase : List[str] = '''base imports''' if key == '''none''' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def a ( ):
"""simple docstring"""
UpperCamelCase : Any = []
for root, _, files in os.walk(SCREAMING_SNAKE_CASE_ ):
if "__init__.py" in files:
UpperCamelCase : int = os.path.join(SCREAMING_SNAKE_CASE_ , '''__init__.py''' )
UpperCamelCase : Optional[int] = parse_init(SCREAMING_SNAKE_CASE_ )
if objects is not None:
UpperCamelCase : str = analyze_results(*SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
UpperCamelCase : List[Any] = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
raise ValueError('''\n\n'''.join(SCREAMING_SNAKE_CASE_ ) )
def a ( ):
"""simple docstring"""
UpperCamelCase : Dict = []
for path, directories, files in os.walk(SCREAMING_SNAKE_CASE_ ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(SCREAMING_SNAKE_CASE_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(SCREAMING_SNAKE_CASE_ ) / folder).glob('''*.py''' ) ) ) == 0:
continue
UpperCamelCase : List[str] = str((Path(SCREAMING_SNAKE_CASE_ ) / folder).relative_to(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : str = short_path.replace(os.path.sep , '''.''' )
submodules.append(SCREAMING_SNAKE_CASE_ )
for fname in files:
if fname == "__init__.py":
continue
UpperCamelCase : Tuple = str((Path(SCREAMING_SNAKE_CASE_ ) / fname).relative_to(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : int = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(SCREAMING_SNAKE_CASE_ )
return submodules
__UpperCAmelCase : Optional[int] = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
"models.esm.openfold_utils",
]
def a ( ):
"""simple docstring"""
from transformers.utils import direct_transformers_import
UpperCamelCase : Tuple = direct_transformers_import(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(SCREAMING_SNAKE_CASE_ , '''__init__.py''' ) , '''r''' ) as f:
UpperCamelCase : List[Any] = f.read()
import_structure_keys.update(set(re.findall(R'''import_structure\[\"([^\"]*)\"\]''' , SCREAMING_SNAKE_CASE_ ) ) )
UpperCamelCase : Union[str, Any] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(SCREAMING_SNAKE_CASE_ ) > 0:
UpperCamelCase : str = '''\n'''.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registed in the main init of Transformers:\n'''
F"""{list_of_modules}\n"""
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 315 | 1 |
"""simple docstring"""
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
_a : str = logging.get_logger(__name__)
_a : Optional[int] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn.grep_linear': 'encoder.layers.*.attention.gru_rel_pos_linear',
'self_attn.relative_attention_bias': 'encoder.layers.*.attention.rel_attn_embed',
'self_attn.grep_a': 'encoder.layers.*.attention.gru_rel_pos_const',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
_a : List[Any] = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ,_lowerCamelCase : List[Any] ,_lowerCamelCase : Optional[int] ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : str ) -> List[str]:
for attribute in key.split(""".""" ):
_lowerCAmelCase : Tuple = getattr(_lowerCamelCase ,_lowerCamelCase )
if weight_type is not None:
_lowerCAmelCase : int = getattr(_lowerCamelCase ,_lowerCamelCase ).shape
else:
_lowerCAmelCase : str = hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
_lowerCAmelCase : Optional[Any] = value
elif weight_type == "weight_g":
_lowerCAmelCase : str = value
elif weight_type == "weight_v":
_lowerCAmelCase : Union[str, Any] = value
elif weight_type == "bias":
_lowerCAmelCase : Optional[int] = value
else:
_lowerCAmelCase : List[Any] = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ,_lowerCamelCase : Dict ) -> Any:
_lowerCAmelCase : int = []
_lowerCAmelCase : Dict = fairseq_model.state_dict()
_lowerCAmelCase : int = hf_model.feature_extractor
for name, value in fairseq_dict.items():
_lowerCAmelCase : int = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,hf_model.config.feat_extract_norm == """group""" ,)
_lowerCAmelCase : Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
_lowerCAmelCase : List[Any] = True
if "*" in mapped_key:
_lowerCAmelCase : int = name.split(_lowerCamelCase )[0].split(""".""" )[-2]
_lowerCAmelCase : Any = mapped_key.replace("""*""" ,_lowerCamelCase )
if "weight_g" in name:
_lowerCAmelCase : int = """weight_g"""
elif "weight_v" in name:
_lowerCAmelCase : List[str] = """weight_v"""
elif "bias" in name and "relative_attention_bias" not in name:
_lowerCAmelCase : Tuple = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowerCAmelCase : str = """weight"""
else:
_lowerCAmelCase : Dict = None
set_recursively(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
continue
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(f"Unused weights: {unused_weights}" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ,_lowerCamelCase : Any ,_lowerCamelCase : Tuple ,_lowerCamelCase : Optional[Any] ,_lowerCamelCase : Optional[Any] ) -> List[str]:
_lowerCAmelCase : List[Any] = full_name.split("""conv_layers.""" )[-1]
_lowerCAmelCase : Optional[Any] = name.split(""".""" )
_lowerCAmelCase : Dict = int(items[0] )
_lowerCAmelCase : List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
_lowerCAmelCase : List[str] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
_lowerCAmelCase : Any = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
_lowerCAmelCase : List[str] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
_lowerCAmelCase : int = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_lowerCamelCase )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : List[Any] ,_lowerCamelCase : str=None ) -> Dict:
# load the pre-trained checkpoints
_lowerCAmelCase : int = torch.load(_lowerCamelCase )
_lowerCAmelCase : List[str] = WavLMConfigOrig(checkpoint["""cfg"""] )
_lowerCAmelCase : Tuple = WavLMOrig(_lowerCamelCase )
model.load_state_dict(checkpoint["""model"""] )
model.eval()
if config_path is not None:
_lowerCAmelCase : Any = WavLMConfig.from_pretrained(_lowerCamelCase )
else:
_lowerCAmelCase : Any = WavLMConfig()
_lowerCAmelCase : Union[str, Any] = WavLMModel(_lowerCamelCase )
recursively_load_weights(_lowerCamelCase ,_lowerCamelCase )
hf_wavlm.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_a : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
_a : int = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 44 | """simple docstring"""
from math import ceil
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Union[str, Any] ) -> int:
_lowerCAmelCase : Dict = list(range(0 ,_lowerCamelCase ) )
_lowerCAmelCase : Tuple = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
_lowerCAmelCase : Union[str, Any] = []
for i in device_map_blocks:
if device_map_blocks.count(_lowerCamelCase ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(_lowerCamelCase )
# Missing blocks
_lowerCAmelCase : int = [i for i in blocks if i not in device_map_blocks]
_lowerCAmelCase : List[Any] = [i for i in device_map_blocks if i not in blocks]
if len(_lowerCamelCase ) != 0:
raise ValueError(
"""Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."""
""" These attention blocks were specified more than once: """ + str(_lowerCamelCase ) )
if len(_lowerCamelCase ) != 0:
raise ValueError(
"""There are attention blocks for this model that are not specified in the device_map. Add these attention """
"""blocks to a device on the device_map: """ + str(_lowerCamelCase ) )
if len(_lowerCamelCase ) != 0:
raise ValueError(
"""The device_map contains more attention blocks than this model has. Remove these from the device_map:"""
+ str(_lowerCamelCase ) )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : Tuple ) -> str:
_lowerCAmelCase : Optional[Any] = list(range(_lowerCamelCase ) )
_lowerCAmelCase : Optional[Any] = int(ceil(n_layers / len(_lowerCamelCase ) ) )
_lowerCAmelCase : Optional[int] = [layers[i : i + n_blocks] for i in range(0 ,_lowerCamelCase ,_lowerCamelCase )]
return dict(zip(_lowerCamelCase ,_lowerCamelCase ) )
| 44 | 1 |
'''simple docstring'''
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A =logging.get_logger(__name__)
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : Dict = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''encoder.deit.blocks.{i}.norm1.weight''', f'''encoder.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''encoder.deit.blocks.{i}.norm1.bias''', f'''encoder.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.attn.proj.weight''', f'''encoder.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.attn.proj.bias''', f'''encoder.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.norm2.weight''', f'''encoder.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''encoder.deit.blocks.{i}.norm2.bias''', f'''encoder.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.mlp.fc1.weight''', f'''encoder.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.mlp.fc1.bias''', f'''encoder.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.mlp.fc2.weight''', f'''encoder.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''encoder.deit.blocks.{i}.mlp.fc2.bias''', f'''encoder.encoder.layer.{i}.output.dense.bias''') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""),
("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""),
("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""),
("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""),
("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""),
("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""),
] )
return rename_keys
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
UpperCAmelCase__ : Dict = state_dict.pop(f'''encoder.deit.blocks.{i}.attn.qkv.weight''' )
UpperCAmelCase__ : List[Any] = in_proj_weight[
: encoder_config.hidden_size, :
]
UpperCAmelCase__ : str = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
UpperCAmelCase__ : Optional[int] = in_proj_weight[
-encoder_config.hidden_size :, :
]
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : int = dct.pop(UpperCamelCase__ )
UpperCAmelCase__ : str = val
def _UpperCamelCase ( UpperCamelCase__ ):
if "handwritten" in checkpoint_url:
UpperCAmelCase__ : Optional[Any] = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
UpperCAmelCase__ : List[str] = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"""
UpperCAmelCase__ : Union[str, Any] = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert("""RGB""" )
return im
@torch.no_grad()
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : Optional[Any] = ViTConfig(image_size=3_8_4 , qkv_bias=UpperCamelCase__ )
UpperCAmelCase__ : Optional[Any] = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
UpperCAmelCase__ : str = 7_6_8
elif "large" in checkpoint_url:
# use ViT-large encoder
UpperCAmelCase__ : List[str] = 1_0_2_4
UpperCAmelCase__ : Union[str, Any] = 4_0_9_6
UpperCAmelCase__ : str = 2_4
UpperCAmelCase__ : Dict = 1_6
UpperCAmelCase__ : Any = 1_0_2_4
else:
raise ValueError("""Should either find 'base' or 'large' in checkpoint URL""" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : List[Any] = """relu"""
UpperCAmelCase__ : Tuple = 1_0_2_4
UpperCAmelCase__ : Any = True
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : List[Any] = False
# load HuggingFace model
UpperCAmelCase__ : Dict = ViTModel(UpperCamelCase__ , add_pooling_layer=UpperCamelCase__ )
UpperCAmelCase__ : int = TrOCRForCausalLM(UpperCamelCase__ )
UpperCAmelCase__ : int = VisionEncoderDecoderModel(encoder=UpperCamelCase__ , decoder=UpperCamelCase__ )
model.eval()
# load state_dict of original model, rename some keys
UpperCAmelCase__ : List[str] = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location="""cpu""" , check_hash=UpperCamelCase__ )["""model"""]
UpperCAmelCase__ : Optional[Any] = create_rename_keys(UpperCamelCase__ , UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
read_in_q_k_v(UpperCamelCase__ , UpperCamelCase__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
UpperCAmelCase__ : Tuple = state_dict.pop(UpperCamelCase__ )
if key.startswith("""decoder""" ) and "output_projection" not in key:
UpperCAmelCase__ : Optional[int] = val
else:
UpperCAmelCase__ : Optional[int] = val
# load state dict
model.load_state_dict(UpperCamelCase__ )
# Check outputs on an image
UpperCAmelCase__ : Dict = ViTImageProcessor(size=encoder_config.image_size )
UpperCAmelCase__ : Tuple = RobertaTokenizer.from_pretrained("""roberta-large""" )
UpperCAmelCase__ : str = TrOCRProcessor(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase__ : List[Any] = processor(images=prepare_img(UpperCamelCase__ ) , return_tensors="""pt""" ).pixel_values
# verify logits
UpperCAmelCase__ : Optional[Any] = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
UpperCAmelCase__ : Dict = model(pixel_values=UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ )
UpperCAmelCase__ : Optional[Any] = outputs.logits
UpperCAmelCase__ : Tuple = torch.Size([1, 1, 5_0_2_6_5] )
if "trocr-base-handwritten" in checkpoint_url:
UpperCAmelCase__ : Union[str, Any] = torch.tensor(
[-1.45_02, -4.66_83, -0.53_47, -2.92_91, 9.14_35, -3.05_71, 8.97_64, 1.75_60, 8.73_58, -1.53_11] )
elif "trocr-large-handwritten" in checkpoint_url:
UpperCAmelCase__ : Optional[int] = torch.tensor(
[-2.64_37, -1.31_29, -2.25_96, -5.34_55, 6.35_39, 1.76_04, 5.49_91, 1.47_02, 5.61_13, 2.01_70] )
elif "trocr-base-printed" in checkpoint_url:
UpperCAmelCase__ : Any = torch.tensor(
[-5.68_16, -5.83_88, 1.13_98, -6.90_34, 6.85_05, -2.43_93, 1.22_84, -1.02_32, -1.96_61, -3.92_10] )
elif "trocr-large-printed" in checkpoint_url:
UpperCAmelCase__ : List[str] = torch.tensor(
[-6.01_62, -7.09_59, 4.41_55, -5.10_63, 7.04_68, -3.16_31, 2.64_66, -0.30_81, -0.81_06, -1.75_35] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :1_0] , UpperCamelCase__ , atol=1e-3 ), "First elements of logits not as expected"
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase__ )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt',
type=str,
help='URL to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
__A =parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 283 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A ={
'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'],
'tokenization_m2m_100': ['M2M100Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST',
'M2M100ForConditionalGeneration',
'M2M100Model',
'M2M100PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 283 | 1 |
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case ( self : int ):
lowercase__ : Optional[int] = AutoImageProcessor.from_pretrained("microsoft/dit-base-finetuned-rvlcdip" )
lowercase__ : Any = AutoModelForImageClassification.from_pretrained("microsoft/dit-base-finetuned-rvlcdip" )
model.to(SCREAMING_SNAKE_CASE )
from datasets import load_dataset
lowercase__ : Union[str, Any] = load_dataset("nielsr/rvlcdip-demo" )
lowercase__ : Dict = dataset["train"][0]["image"].convert("RGB" )
lowercase__ : int = image_processor(SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowercase__ : str = model(**SCREAMING_SNAKE_CASE )
lowercase__ : Any = outputs.logits
lowercase__ : Dict = torch.Size((1, 16) )
self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = torch.tensor(
[-0.4_158, -0.4_092, -0.4_347] , device=SCREAMING_SNAKE_CASE , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 130 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger()
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = True ):
"""simple docstring"""
print(F"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
lowercase__ : Union[str, Any] = timm.create_model("levit_128s" , pretrained=lowerCamelCase__ )
else:
lowercase__ : Union[str, Any] = timm.create_model("levit_128" , pretrained=lowerCamelCase__ )
if hidden_sizes == 192:
lowercase__ : Dict = timm.create_model("levit_192" , pretrained=lowerCamelCase__ )
if hidden_sizes == 256:
lowercase__ : Optional[Any] = timm.create_model("levit_256" , pretrained=lowerCamelCase__ )
if hidden_sizes == 384:
lowercase__ : List[str] = timm.create_model("levit_384" , pretrained=lowerCamelCase__ )
from_model.eval()
lowercase__ : Union[str, Any] = LevitForImageClassificationWithTeacher(lowerCamelCase__ ).eval()
lowercase__ : Tuple = OrderedDict()
lowercase__ : Dict = from_model.state_dict()
lowercase__ : Union[str, Any] = list(from_model.state_dict().keys() )
lowercase__ : Any = list(our_model.state_dict().keys() )
print(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
for i in range(len(lowerCamelCase__ ) ):
lowercase__ : Union[str, Any] = weights[og_keys[i]]
our_model.load_state_dict(lowerCamelCase__ )
lowercase__ : List[str] = torch.randn((2, 3, 224, 224) )
lowercase__ : Optional[Any] = from_model(lowerCamelCase__ )
lowercase__ : Optional[Any] = our_model(lowerCamelCase__ ).logits
assert torch.allclose(lowerCamelCase__ , lowerCamelCase__ ), "The model logits don't match the original one."
lowercase__ : Optional[Any] = name
print(lowerCamelCase__ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
lowercase__ : Union[str, Any] = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F"""Pushed {checkpoint_name}""" )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = True ):
"""simple docstring"""
lowercase__ : Optional[Any] = "imagenet-1k-id2label.json"
lowercase__ : str = 1_000
lowercase__ : Any = (1, num_labels)
lowercase__ : Optional[Any] = "huggingface/label-files"
lowercase__ : Optional[Any] = num_labels
lowercase__ : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
lowercase__ : Optional[int] = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowercase__ : Dict = idalabel
lowercase__ : str = {v: k for k, v in idalabel.items()}
lowercase__ : Optional[Any] = partial(lowerCamelCase__ , num_labels=lowerCamelCase__ , idalabel=lowerCamelCase__ , labelaid=lowerCamelCase__ )
lowercase__ : List[str] = {
"levit-128S": 128,
"levit-128": 128,
"levit-192": 192,
"levit-256": 256,
"levit-384": 384,
}
lowercase__ : int = {
"levit-128S": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"levit-128": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"levit-192": ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"levit-256": ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"levit-384": ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , lowerCamelCase__ , names_to_config[model_name] , lowerCamelCase__ , lowerCamelCase__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return config, expected_shape
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''The name of the model you wish to convert, it must be one of the supported Levit* architecture,''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''levit-dump-folder/''',
type=Path,
required=False,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 130 | 1 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> list[int]: # This function is recursive
snake_case : List[Any] = len(lowercase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
snake_case : List[str] = array[0]
snake_case : Tuple = False
snake_case : List[Any] = 1
snake_case : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
snake_case : Tuple = True
snake_case : List[Any] = [element for element in array[i:] if element >= array[i]]
snake_case : Optional[Any] = longest_subsequence(lowercase )
if len(lowercase ) > len(lowercase ):
snake_case : Any = temp_array
else:
i += 1
snake_case : Tuple = [element for element in array[1:] if element >= pivot]
snake_case : Optional[int] = [pivot, *longest_subsequence(lowercase )]
if len(lowercase ) > len(lowercase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 176 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCamelCase : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase : Dict = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase=8 ) -> Optional[int]:
snake_case : Dict = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
snake_case : int = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def __init__( self , A , A , A , ) -> int:
super().__init__()
self.register_modules(
unet=A , scheduler=A , movq=A , )
snake_case : Tuple = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase ( self , A , A , A , A , A , A ) -> str:
if latents is None:
snake_case : List[str] = randn_tensor(A , generator=A , device=A , dtype=A )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
snake_case : Union[str, Any] = latents.to(A )
snake_case : Optional[Any] = latents * scheduler.init_noise_sigma
return latents
def UpperCAmelCase ( self , A=0 ) -> List[str]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
snake_case : Dict = torch.device(f"""cuda:{gpu_id}""" )
snake_case : Tuple = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(A , A )
def UpperCAmelCase ( self , A=0 ) -> Optional[Any]:
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
snake_case : int = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=A )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
snake_case : Tuple = None
for cpu_offloaded_model in [self.unet, self.movq]:
snake_case , snake_case : Tuple = cpu_offload_with_hook(A , A , prev_module_hook=A )
# We'll offload the last model manually.
snake_case : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase ( self ) -> str:
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(A , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(A )
def __call__( self , A , A , A , A = 5_1_2 , A = 5_1_2 , A = 1_0_0 , A = 4.0 , A = 1 , A = None , A = None , A = "pil" , A = True , ) -> Any:
snake_case : int = self._execution_device
snake_case : Tuple = guidance_scale > 1.0
if isinstance(A , A ):
snake_case : str = torch.cat(A , dim=0 )
if isinstance(A , A ):
snake_case : int = torch.cat(A , dim=0 )
if isinstance(A , A ):
snake_case : Tuple = torch.cat(A , dim=0 )
snake_case : List[Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
snake_case : Optional[int] = image_embeds.repeat_interleave(A , dim=0 )
snake_case : Dict = negative_image_embeds.repeat_interleave(A , dim=0 )
snake_case : Optional[Any] = hint.repeat_interleave(A , dim=0 )
snake_case : List[str] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=A )
snake_case : Optional[Any] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=A )
self.scheduler.set_timesteps(A , device=A )
snake_case : Tuple = self.scheduler.timesteps
snake_case : Tuple = self.movq.config.latent_channels
snake_case , snake_case : Optional[int] = downscale_height_and_width(A , A , self.movq_scale_factor )
# create initial latent
snake_case : str = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , A , A , A , self.scheduler , )
for i, t in enumerate(self.progress_bar(A ) ):
# expand the latents if we are doing classifier free guidance
snake_case : str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case : Any = {"""image_embeds""": image_embeds, """hint""": hint}
snake_case : List[Any] = self.unet(
sample=A , timestep=A , encoder_hidden_states=A , added_cond_kwargs=A , return_dict=A , )[0]
if do_classifier_free_guidance:
snake_case , snake_case : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 )
snake_case , snake_case : Tuple = noise_pred.chunk(2 )
snake_case , snake_case : Optional[Any] = variance_pred.chunk(2 )
snake_case : Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
snake_case : Dict = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
snake_case , snake_case : int = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
snake_case : Dict = self.scheduler.step(
A , A , A , generator=A , )[0]
# post-processing
snake_case : Tuple = self.movq.decode(A , force_not_quantize=A )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
snake_case : Optional[int] = image * 0.5 + 0.5
snake_case : Union[str, Any] = image.clamp(0 , 1 )
snake_case : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
snake_case : Tuple = self.numpy_to_pil(A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A )
| 176 | 1 |
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ""
SCREAMING_SNAKE_CASE__ : Optional[int] = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self , snake_case = None , snake_case = None , **snake_case , ):
'''simple docstring'''
super().__init__(self , **snake_case )
UpperCAmelCase : str = repo_info
UpperCAmelCase : Optional[int] = token
UpperCAmelCase : Optional[int] = None
def A_ ( self ):
'''simple docstring'''
if self.dir_cache is None:
UpperCAmelCase : Union[str, Any] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
UpperCAmelCase : Union[str, Any] = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(snake_case ): {"name": str(snake_case ), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def A_ ( self , snake_case , snake_case = "rb" , **snake_case , ):
'''simple docstring'''
if not isinstance(self.repo_info , snake_case ):
raise NotImplementedError(f"Open is only implemented for dataset repositories, but got {self.repo_info}" )
UpperCAmelCase : Tuple = hf_hub_url(self.repo_info.id , snake_case , revision=self.repo_info.sha )
return fsspec.open(
snake_case , mode=snake_case , headers=get_authentication_headers_for_url(snake_case , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open()
def A_ ( self , snake_case , **snake_case ):
'''simple docstring'''
self._get_dirs()
UpperCAmelCase : List[str] = self._strip_protocol(snake_case )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(snake_case )
def A_ ( self , snake_case , snake_case=False , **snake_case ):
'''simple docstring'''
self._get_dirs()
UpperCAmelCase : Any = PurePosixPath(path.strip("/" ) )
UpperCAmelCase : List[str] = {}
for p, f in self.dir_cache.items():
UpperCAmelCase : List[str] = PurePosixPath(p.strip("/" ) )
UpperCAmelCase : Optional[Any] = p.parent
if root == path:
UpperCAmelCase : Optional[int] = f
UpperCAmelCase : List[str] = list(paths.values() )
if detail:
return out
else:
return sorted(f["name"] for f in out )
| 311 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase__ :
"""simple docstring"""
@staticmethod
def A_ ( *snake_case , **snake_case ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : str = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
UpperCAmelCase : Union[str, Any] = [
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
]
return object_detector, examples
def A_ ( self , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = object_detector(examples[0] , threshold=0.0 )
UpperCAmelCase : Dict = len(snake_case )
self.assertGreater(snake_case , 0 )
self.assertEqual(
snake_case , [
{
"score": ANY(snake_case ),
"label": ANY(snake_case ),
"box": {"xmin": ANY(snake_case ), "ymin": ANY(snake_case ), "xmax": ANY(snake_case ), "ymax": ANY(snake_case )},
}
for i in range(snake_case )
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def A_ ( self ):
'''simple docstring'''
pass
@require_torch
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
UpperCAmelCase : Optional[Any] = object_detector(
"./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.64 , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.7235, "label": "cat", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7218, "label": "remote", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7184, "label": "couch", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.6748, "label": "remote", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6656, "label": "cat", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6614, "label": "couch", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6456, "label": "remote", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
{"score": 0.642, "label": "remote", "box": {"xmin": 6_7, "ymin": 2_7_4, "xmax": 9_3, "ymax": 2_9_7}},
{"score": 0.6419, "label": "cat", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
] , )
UpperCAmelCase : Tuple = object_detector(
[
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
[
{"score": 0.7235, "label": "cat", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7218, "label": "remote", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7184, "label": "couch", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.6748, "label": "remote", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6656, "label": "cat", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6614, "label": "couch", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6456, "label": "remote", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
{"score": 0.642, "label": "remote", "box": {"xmin": 6_7, "ymin": 2_7_4, "xmax": 9_3, "ymax": 2_9_7}},
{"score": 0.6419, "label": "cat", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
]
] , )
@require_torch
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = pipeline("zero-shot-object-detection" )
UpperCAmelCase : Optional[int] = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}},
] , )
UpperCAmelCase : Union[str, Any] = object_detector(
[
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
] , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}},
],
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}},
],
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def A_ ( self ):
'''simple docstring'''
pass
@require_torch
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = 0.2
UpperCAmelCase : Union[str, Any] = pipeline("zero-shot-object-detection" )
UpperCAmelCase : str = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=snake_case , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
] , )
@require_torch
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = 2
UpperCAmelCase : Optional[Any] = pipeline("zero-shot-object-detection" )
UpperCAmelCase : List[str] = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=snake_case , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
] , )
| 311 | 1 |
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
A : str = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowerCamelCase (datasets.BuilderConfig ):
"""simple docstring"""
lowerCamelCase__ = 1_0_0_0_0
lowerCamelCase__ = None
lowerCamelCase__ = None
class lowerCamelCase (datasets.ArrowBasedBuilder ):
"""simple docstring"""
lowerCamelCase__ = ParquetConfig
def __A ( self : int ) -> Optional[Any]:
return datasets.DatasetInfo(features=self.config.features )
def __A ( self : Tuple , __magic_name__ : List[str] ) -> Union[str, Any]:
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
SCREAMING_SNAKE_CASE_ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__magic_name__ , (str, list, tuple) ):
SCREAMING_SNAKE_CASE_ = data_files
if isinstance(__magic_name__ , __magic_name__ ):
SCREAMING_SNAKE_CASE_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
SCREAMING_SNAKE_CASE_ = [dl_manager.iter_files(__magic_name__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
SCREAMING_SNAKE_CASE_ = []
for split_name, files in data_files.items():
if isinstance(__magic_name__ , __magic_name__ ):
SCREAMING_SNAKE_CASE_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
SCREAMING_SNAKE_CASE_ = [dl_manager.iter_files(__magic_name__ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(__magic_name__ ):
with open(__magic_name__ , "rb" ) as f:
SCREAMING_SNAKE_CASE_ = datasets.Features.from_arrow_schema(pq.read_schema(__magic_name__ ) )
break
splits.append(datasets.SplitGenerator(name=__magic_name__ , gen_kwargs={"files": files} ) )
return splits
def __A ( self : Tuple , __magic_name__ : pa.Table ) -> pa.Table:
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
SCREAMING_SNAKE_CASE_ = table_cast(__magic_name__ , self.info.features.arrow_schema )
return pa_table
def __A ( self : List[str] , __magic_name__ : Optional[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''' )
for file_idx, file in enumerate(itertools.chain.from_iterable(__magic_name__ ) ):
with open(__magic_name__ , "rb" ) as f:
SCREAMING_SNAKE_CASE_ = pq.ParquetFile(__magic_name__ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
SCREAMING_SNAKE_CASE_ = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F'''{file_idx}_{batch_idx}''', self._cast_table(__magic_name__ )
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}''' )
raise
| 305 | from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A : str = logging.get_logger(__name__)
A : Optional[int] = {
"microsoft/table-transformer-detection": (
"https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"
),
}
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''table-transformer'''
lowerCamelCase__ = ['''past_key_values''']
lowerCamelCase__ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : List[Any] , __magic_name__ : Optional[Any]=True , __magic_name__ : Dict=None , __magic_name__ : Any=3 , __magic_name__ : List[str]=100 , __magic_name__ : Union[str, Any]=6 , __magic_name__ : Dict=2_048 , __magic_name__ : str=8 , __magic_name__ : int=6 , __magic_name__ : List[Any]=2_048 , __magic_name__ : Optional[int]=8 , __magic_name__ : Optional[int]=0.0 , __magic_name__ : List[Any]=0.0 , __magic_name__ : Optional[Any]=True , __magic_name__ : List[Any]="relu" , __magic_name__ : List[str]=256 , __magic_name__ : List[str]=0.1 , __magic_name__ : int=0.0 , __magic_name__ : Optional[Any]=0.0 , __magic_name__ : Tuple=0.02 , __magic_name__ : str=1.0 , __magic_name__ : int=False , __magic_name__ : Dict="sine" , __magic_name__ : Union[str, Any]="resnet50" , __magic_name__ : Optional[Any]=True , __magic_name__ : str=False , __magic_name__ : List[str]=1 , __magic_name__ : int=5 , __magic_name__ : Union[str, Any]=2 , __magic_name__ : Tuple=1 , __magic_name__ : Optional[int]=1 , __magic_name__ : Optional[Any]=5 , __magic_name__ : Optional[int]=2 , __magic_name__ : Union[str, Any]=0.1 , **__magic_name__ : Tuple , ) -> str:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
SCREAMING_SNAKE_CASE_ = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(__magic_name__ , __magic_name__ ):
SCREAMING_SNAKE_CASE_ = backbone_config.get("model_type" )
SCREAMING_SNAKE_CASE_ = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE_ = config_class.from_dict(__magic_name__ )
# set timm attributes to None
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None, None, None
SCREAMING_SNAKE_CASE_ = use_timm_backbone
SCREAMING_SNAKE_CASE_ = backbone_config
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = num_queries
SCREAMING_SNAKE_CASE_ = d_model
SCREAMING_SNAKE_CASE_ = encoder_ffn_dim
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = encoder_attention_heads
SCREAMING_SNAKE_CASE_ = decoder_ffn_dim
SCREAMING_SNAKE_CASE_ = decoder_layers
SCREAMING_SNAKE_CASE_ = decoder_attention_heads
SCREAMING_SNAKE_CASE_ = dropout
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = activation_dropout
SCREAMING_SNAKE_CASE_ = activation_function
SCREAMING_SNAKE_CASE_ = init_std
SCREAMING_SNAKE_CASE_ = init_xavier_std
SCREAMING_SNAKE_CASE_ = encoder_layerdrop
SCREAMING_SNAKE_CASE_ = decoder_layerdrop
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = auxiliary_loss
SCREAMING_SNAKE_CASE_ = position_embedding_type
SCREAMING_SNAKE_CASE_ = backbone
SCREAMING_SNAKE_CASE_ = use_pretrained_backbone
SCREAMING_SNAKE_CASE_ = dilation
# Hungarian matcher
SCREAMING_SNAKE_CASE_ = class_cost
SCREAMING_SNAKE_CASE_ = bbox_cost
SCREAMING_SNAKE_CASE_ = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE_ = mask_loss_coefficient
SCREAMING_SNAKE_CASE_ = dice_loss_coefficient
SCREAMING_SNAKE_CASE_ = bbox_loss_coefficient
SCREAMING_SNAKE_CASE_ = giou_loss_coefficient
SCREAMING_SNAKE_CASE_ = eos_coefficient
super().__init__(is_encoder_decoder=__magic_name__ , **__magic_name__ )
@property
def __A ( self : Union[str, Any] ) -> int:
return self.encoder_attention_heads
@property
def __A ( self : Any ) -> int:
return self.d_model
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __A ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def __A ( self : Any ) -> float:
return 1e-5
@property
def __A ( self : int ) -> int:
return 12
| 305 | 1 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class a_ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = inspect.getfile(accelerate.test_utils )
__UpperCAmelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_cli.py'] )
__UpperCAmelCase = ['accelerate', 'launch']
__UpperCAmelCase = Path.home() / '.cache/huggingface/accelerate'
__UpperCAmelCase = 'default_config.yaml'
__UpperCAmelCase = config_folder / config_file
__UpperCAmelCase = config_folder / '_default_config.yaml'
__UpperCAmelCase = Path('tests/test_configs' )
@classmethod
def _lowerCAmelCase ( cls : Union[str, Any] ):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def _lowerCAmelCase ( cls : Dict ):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] ,env=os.environ.copy() )
def _lowerCAmelCase ( self : Optional[Any] ):
for config in sorted(self.test_config_path.glob('**/*.yaml' ) ):
with self.subTest(config_file=snake_case ):
execute_subprocess_async(
self.base_cmd + ['--config_file', str(snake_case ), self.test_file_path] ,env=os.environ.copy() )
def _lowerCAmelCase ( self : Dict ):
execute_subprocess_async(['accelerate', 'test'] ,env=os.environ.copy() )
class a_ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = 'test-tpu'
__UpperCAmelCase = 'us-central1-a'
__UpperCAmelCase = 'ls'
__UpperCAmelCase = ['accelerate', 'tpu-config']
__UpperCAmelCase = 'cd /usr/share'
__UpperCAmelCase = 'tests/test_samples/test_command_file.sh'
__UpperCAmelCase = 'Running gcloud compute tpus tpu-vm ssh'
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =run_command(
self.cmd
+ ['--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug'] ,return_stdout=snake_case ,)
self.assertIn(
f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all' ,snake_case ,)
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command',
self.command,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] ,return_stdout=snake_case ,)
self.assertIn(
f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all' ,snake_case ,)
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--debug'] ,return_stdout=snake_case )
self.assertIn(
f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all' ,snake_case ,)
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--debug'] ,return_stdout=snake_case ,)
self.assertIn(
f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all' ,snake_case ,)
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE =run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--command',
self.command,
'--command',
'echo "Hello World"',
'--debug',
] ,return_stdout=snake_case ,)
self.assertIn(
f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all' ,snake_case ,)
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =run_command(
self.cmd
+ ['--config_file', 'tests/test_configs/latest.yaml', '--command_file', self.command_file, '--debug'] ,return_stdout=snake_case ,)
self.assertIn(
f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all' ,snake_case ,)
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE =run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command_file',
self.command_file,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] ,return_stdout=snake_case ,)
self.assertIn(
f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all' ,snake_case ,)
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--debug'] ,return_stdout=snake_case ,)
self.assertIn(
f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all' ,snake_case ,)
def _lowerCAmelCase ( self : str ):
SCREAMING_SNAKE_CASE =run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--install_accelerate',
'--accelerate_version',
'12.0.0',
'--debug',
] ,return_stdout=snake_case ,)
self.assertIn(
f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all' ,snake_case ,)
| 334 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {"configuration_mmbt": ["MMBTConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["MMBTForClassification", "MMBTModel", "ModalEmbeddings"]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 211 | 0 |
from collections.abc import Generator
def SCREAMING_SNAKE_CASE ( ):
snake_case__, snake_case__ : Optional[Any] = 0, 1
while True:
snake_case__, snake_case__ : str = b, a + b
yield b
def SCREAMING_SNAKE_CASE ( snake_case_ : int = 1000 ):
snake_case__ : Optional[Any] = 1
snake_case__ : List[Any] = fibonacci_generator()
while len(str(next(snake_case_ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 286 |
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
__lowerCamelCase : int = logging.get_logger(__name__)
__lowerCamelCase : List[str] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
__lowerCamelCase : Any = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
__lowerCamelCase : Optional[int] = {
"""allenai/led-base-16384""": 1_6384,
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = LEDTokenizer
a_ = ["input_ids", "attention_mask"]
def __init__( self : int , __A : Optional[int]=None , __A : Any=None , __A : Any=None , __A : int="replace" , __A : Optional[Any]="<s>" , __A : List[Any]="</s>" , __A : Dict="</s>" , __A : Union[str, Any]="<s>" , __A : List[Any]="<unk>" , __A : Optional[Any]="<pad>" , __A : Optional[int]="<mask>" , __A : str=False , __A : Optional[Any]=True , **__A : List[Any] , ):
super().__init__(
__A , __A , tokenizer_file=__A , errors=__A , bos_token=__A , eos_token=__A , sep_token=__A , cls_token=__A , unk_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , trim_offsets=__A , **__A , )
snake_case__ : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __A ) != add_prefix_space:
snake_case__ : List[str] = getattr(__A , pre_tok_state.pop("type" ) )
snake_case__ : Tuple = add_prefix_space
snake_case__ : Any = pre_tok_class(**__A )
snake_case__ : List[Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
snake_case__ : str = "post_processor"
snake_case__ : Union[str, Any] = getattr(self.backend_tokenizer , __A , __A )
if tokenizer_component_instance:
snake_case__ : Any = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case__ : int = tuple(state["sep"] )
if "cls" in state:
snake_case__ : Optional[int] = tuple(state["cls"] )
snake_case__ : Union[str, Any] = False
if state.get("add_prefix_space" , __A ) != add_prefix_space:
snake_case__ : Optional[Any] = add_prefix_space
snake_case__ : List[str] = True
if state.get("trim_offsets" , __A ) != trim_offsets:
snake_case__ : str = trim_offsets
snake_case__ : Tuple = True
if changes_to_apply:
snake_case__ : Any = getattr(__A , state.pop("type" ) )
snake_case__ : Optional[Any] = component_class(**__A )
setattr(self.backend_tokenizer , __A , __A )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def _lowercase ( self : Union[str, Any] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def _lowercase ( self : str , __A : int ):
snake_case__ : str = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else value
snake_case__ : Optional[int] = value
def _lowercase ( self : List[str] , *__A : Union[str, Any] , **__A : int ):
snake_case__ : Dict = kwargs.get("is_split_into_words" , __A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*__A , **__A )
def _lowercase ( self : Union[str, Any] , *__A : Tuple , **__A : Union[str, Any] ):
snake_case__ : List[str] = kwargs.get("is_split_into_words" , __A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._encode_plus(*__A , **__A )
def _lowercase ( self : Union[str, Any] , __A : str , __A : Optional[str] = None ):
snake_case__ : str = self._tokenizer.model.save(__A , name=__A )
return tuple(__A )
def _lowercase ( self : Union[str, Any] , __A : int , __A : str=None ):
snake_case__ : int = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowercase ( self : Any , __A : List[int] , __A : Optional[List[int]] = None ):
snake_case__ : Optional[Any] = [self.sep_token_id]
snake_case__ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self : str , __A : Union[Dict[str, EncodedInput], BatchEncoding] , __A : Optional[int] = None , __A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __A : Optional[int] = None , __A : Optional[bool] = None , ):
snake_case__ : Dict = super()._pad(
encoded_inputs=__A , max_length=__A , padding_strategy=__A , pad_to_multiple_of=__A , return_attention_mask=__A , )
# Load from model defaults
if return_attention_mask is None:
snake_case__ : Optional[Any] = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
snake_case__ : List[Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
snake_case__ : Union[str, Any] = len(encoded_inputs["global_attention_mask"] ) != len(__A )
if needs_to_be_padded:
snake_case__ : Dict = len(__A ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
snake_case__ : Optional[Any] = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
snake_case__ : Optional[Any] = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 286 | 1 |
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def __snake_case ( ) -> Dict:
A_ : Union[str, Any] = torch.nn.Linear(2 , 4 )
A_ : int = torch.optim.AdamW(model.parameters() , lr=1.0 )
A_ : Optional[Any] = torch.optim.lr_scheduler.OneCycleLR(_lowerCAmelCase , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
A_ : Optional[int] = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
A_ : Tuple = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def __snake_case ( _lowerCAmelCase : List[str] ) -> Dict:
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def __snake_case ( _lowerCAmelCase : int ) -> int:
A_ : Dict = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(_lowerCAmelCase )
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
@require_cuda
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : Union[str, Any] = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(snake_case ):
A_ : Optional[Any] = Accelerator(cpu=snake_case )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : Optional[Any] = Accelerator()
A_ : Optional[Any] = GradientState()
assert state.num_steps == 1
A_ : Optional[Any] = 4
assert state.num_steps == 4
assert state.sync_gradients is True
A_ : Any = False
assert state.sync_gradients is False
GradientState._reset_state()
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
A_ : Any = Accelerator()
A_ , A_ , A_ , A_ , A_ : Union[str, Any] = create_components()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : Tuple = accelerator.prepare(snake_case , snake_case , snake_case , snake_case , snake_case )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : List[str] = Accelerator()
A_ , A_ , A_ , A_ , A_ : Optional[int] = create_components()
accelerator.prepare(snake_case , snake_case , snake_case , snake_case , snake_case )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*snake_case :Optional[int] , **snake_case :Optional[Any] ):
pass
with patch("torch.cuda.set_device" , snake_case ), patch_environment(ACCELERATE_TORCH_DEVICE="cuda:64" ):
A_ : Union[str, Any] = Accelerator()
self.assertEqual(str(accelerator.state.device ) , "cuda:64" )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
A_ : Any = Accelerator()
A_ , A_ , A_ , A_ , A_ : Dict = create_components()
accelerator.prepare(snake_case , snake_case , snake_case , snake_case , snake_case )
A_ : Dict = get_signature(snake_case )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(snake_case )
# make sure random weights don't match
load_random_weights(snake_case )
self.assertTrue(abs(model_signature - get_signature(snake_case ) ) > 1e-3 )
# make sure loaded weights match
accelerator.load_state(snake_case )
self.assertTrue(abs(model_signature - get_signature(snake_case ) ) < 1e-3 )
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
A_ : Dict = Accelerator()
A_ , A_ , A_ , A_ , A_ : Optional[int] = create_components()
accelerator.prepare(snake_case , snake_case , snake_case , snake_case , snake_case )
A_ : Optional[Any] = get_signature(snake_case )
# saving hook
def save_config(snake_case :List[Any] , snake_case :Any , snake_case :Any ):
A_ : Dict = {"class_name": models[0].__class__.__name__}
with open(os.path.join(snake_case , "data.json" ) , "w" ) as f:
json.dump(snake_case , snake_case )
# loading hook
def load_config(snake_case :str , snake_case :List[Any] ):
with open(os.path.join(snake_case , "data.json" ) , "r" ) as f:
A_ : Optional[int] = json.load(snake_case )
A_ : Dict = config["class_name"]
A_ : List[Any] = accelerator.register_save_state_pre_hook(snake_case )
A_ : Optional[int] = accelerator.register_load_state_pre_hook(snake_case )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(snake_case )
# make sure random weights don't match with hooks
load_random_weights(snake_case )
self.assertTrue(abs(model_signature - get_signature(snake_case ) ) > 1e-3 )
# random class name to verify correct one is loaded
A_ : List[Any] = "random"
# make sure loaded weights match with hooks
accelerator.load_state(snake_case )
self.assertTrue(abs(model_signature - get_signature(snake_case ) ) < 1e-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(snake_case )
# make sure random weights don't match with hooks removed
load_random_weights(snake_case )
self.assertTrue(abs(model_signature - get_signature(snake_case ) ) > 1e-3 )
# random class name to verify correct one is loaded
A_ : List[Any] = "random"
# make sure loaded weights match with hooks removed
accelerator.load_state(snake_case )
self.assertTrue(abs(model_signature - get_signature(snake_case ) ) < 1e-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
A_ : Optional[Any] = Accelerator()
A_ , A_ , A_ , A_ , A_ : List[str] = create_components()
A_ : Union[str, Any] = None
# This should work
A_ , A_ , A_ , A_ , A_ , A_ : int = accelerator.prepare(
snake_case , snake_case , snake_case , snake_case , snake_case , snake_case )
self.assertTrue(dummy_obj is None )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : Union[str, Any] = Accelerator()
A_ , A_ , A_ , A_ , A_ : Optional[int] = create_components()
A_ : str = [1, 2, 3]
# This should work
A_ , A_ , A_ , A_ , A_ , A_ : Dict = accelerator.prepare(
snake_case , snake_case , snake_case , snake_case , snake_case , snake_case )
self.assertEqual(
getattr(snake_case , "_is_accelerate_prepared" , snake_case ) , snake_case , "Dummy object should have `_is_accelerate_prepared` set to `True`" , )
self.assertEqual(
getattr(snake_case , "_is_accelerate_prepared" , snake_case ) , snake_case , "Model is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(snake_case , "_is_accelerate_prepared" , snake_case ) , snake_case , "Optimizer is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(snake_case , "_is_accelerate_prepared" , snake_case ) , snake_case , "Scheduler is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(snake_case , "_is_accelerate_prepared" , snake_case ) , snake_case , "Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(snake_case , "_is_accelerate_prepared" , snake_case ) , snake_case , "Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
@slow
@require_bnb
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
A_ : Any = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=snake_case , device_map={"": 0} , )
A_ : Dict = Accelerator()
# This should work
A_ : Optional[Any] = accelerator.prepare(snake_case )
@slow
@require_bnb
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
A_ : Optional[int] = Accelerator()
with init_empty_weights():
A_ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
A_ : Any = infer_auto_device_map(snake_case )
A_ : Dict = "cpu"
A_ : Any = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , device_map=snake_case , load_in_abit=snake_case , llm_inta_enable_fpaa_cpu_offload=snake_case )
# This should not work and get value error
with self.assertRaises(snake_case ):
A_ : Tuple = accelerator.prepare(snake_case )
@slow
@require_bnb
@require_multi_gpu
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
A_ : Tuple = {"distributed_type": DistributedType.MULTI_GPU}
with init_empty_weights():
A_ : Dict = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
A_ : int = infer_auto_device_map(snake_case )
A_ : Tuple = 1
A_ : int = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=snake_case , device_map=snake_case , )
A_ : Dict = Accelerator()
# This should not work and get value error
with self.assertRaises(snake_case ):
A_ : Any = accelerator.prepare(snake_case )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
with init_empty_weights():
A_ : str = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
A_ : Optional[Any] = infer_auto_device_map(snake_case )
A_ : Optional[Any] = 1
A_ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=snake_case , device_map=snake_case , )
A_ : int = Accelerator()
# This should work
A_ : List[Any] = accelerator.prepare(snake_case )
@require_cuda
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
A_ : int = torch.nn.Linear(10 , 10 )
A_ : Optional[int] = torch.optim.SGD(model.parameters() , lr=0.01 )
A_ : Optional[Any] = Accelerator(cpu=snake_case )
A_ : str = accelerator.prepare(snake_case )
| 300 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : float | Decimal , _lowerCAmelCase : float = 10**-10 ) -> float:
A_ : Dict = a
while True:
A_ : Union[str, Any] = Decimal(_lowerCAmelCase ) - (
Decimal(eval(_lowerCAmelCase ) ) / Decimal(eval(str(diff(_lowerCAmelCase ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(_lowerCAmelCase ) ) < precision: # noqa: S307
return float(_lowerCAmelCase )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''')
# Find root of polynomial
print(F'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}''')
# Find Square Root of 5
print(F'''The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}''')
# Exponential Roots
print(F'''The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}''')
| 300 | 1 |
import baseaa
def UpperCAmelCase ( a_ ) -> bytes:
"""simple docstring"""
return baseaa.baaencode(string.encode("utf-8" ) )
def UpperCAmelCase ( a_ ) -> str:
"""simple docstring"""
return baseaa.baadecode(a_ ).decode("utf-8" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Tuple = 'Hello World!'
SCREAMING_SNAKE_CASE :Optional[int] = baseaa_encode(test)
print(encoded)
SCREAMING_SNAKE_CASE :str = baseaa_decode(encoded)
print(decoded)
| 124 |
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
SCREAMING_SNAKE_CASE :Tuple = datasets.load_iris()
SCREAMING_SNAKE_CASE :Dict = np.array(data['data'])
SCREAMING_SNAKE_CASE :Optional[int] = np.array(data['target'])
SCREAMING_SNAKE_CASE :List[str] = data['target_names']
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :List[Any] = train_test_split(X, y)
def UpperCAmelCase ( a_ , a_ ) -> Tuple:
"""simple docstring"""
return np.linalg.norm(np.array(a_ ) - np.array(a_ ) )
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_=5 ) -> Dict:
"""simple docstring"""
__A = zip(a_ , a_ )
# List of distances of all points from the point to be classified
__A = []
for data_point in data:
__A = euclidean_distance(data_point[0] , a_ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
__A = [i[1] for i in sorted(a_ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
__A = Counter(a_ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 124 | 1 |
'''simple docstring'''
from __future__ import annotations
lowerCamelCase : List[str] = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def _lowerCAmelCase ( _UpperCamelCase : list[list[int]] , _UpperCamelCase : list[int] , _UpperCamelCase : list[int] , _UpperCamelCase : int , _UpperCamelCase : list[list[int]] , ) -> tuple[list[list[int]], list[list[int]]]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[
[0 for col in range(len(grid[0] ) )] for row in range(len(_UpperCamelCase ) )
] # the reference grid
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =[
[0 for col in range(len(grid[0] ) )] for row in range(len(_UpperCamelCase ) )
] # the action grid
_SCREAMING_SNAKE_CASE =init[0]
_SCREAMING_SNAKE_CASE =init[1]
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =g + heuristic[x][y] # cost from starting cell to destination cell
_SCREAMING_SNAKE_CASE =[[f, g, x, y]]
_SCREAMING_SNAKE_CASE =False # flag that is set when search is complete
_SCREAMING_SNAKE_CASE =False # flag set if we can't find expand
while not found and not resign:
if len(_UpperCamelCase ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
_SCREAMING_SNAKE_CASE =cell.pop()
_SCREAMING_SNAKE_CASE =next_cell[2]
_SCREAMING_SNAKE_CASE =next_cell[3]
_SCREAMING_SNAKE_CASE =next_cell[1]
if x == goal[0] and y == goal[1]:
_SCREAMING_SNAKE_CASE =True
else:
for i in range(len(_UpperCamelCase ) ): # to try out different valid actions
_SCREAMING_SNAKE_CASE =x + DIRECTIONS[i][0]
_SCREAMING_SNAKE_CASE =y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_UpperCamelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
_SCREAMING_SNAKE_CASE =g + cost
_SCREAMING_SNAKE_CASE =ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =i
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =goal[0]
_SCREAMING_SNAKE_CASE =goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
_SCREAMING_SNAKE_CASE =x - DIRECTIONS[action[x][y]][0]
_SCREAMING_SNAKE_CASE =y - DIRECTIONS[action[x][y]][1]
_SCREAMING_SNAKE_CASE =xa
_SCREAMING_SNAKE_CASE =ya
invpath.append([x, y] )
_SCREAMING_SNAKE_CASE =[]
for i in range(len(_UpperCamelCase ) ):
path.append(invpath[len(_UpperCamelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
lowerCamelCase : Optional[Any] = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
lowerCamelCase : Any = [0, 0]
# all coordinates are given in format [y,x]
lowerCamelCase : Union[str, Any] = [len(grid) - 1, len(grid[0]) - 1]
lowerCamelCase : List[Any] = 1
# the cost map which pushes the path closer to the goal
lowerCamelCase : Any = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
lowerCamelCase : Optional[Any] = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
lowerCamelCase : List[Any] = 9_9
lowerCamelCase , lowerCamelCase : str = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 47 |
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
_UpperCAmelCase : Any = [num for num in range(3, 10_00_01, 2) if not is_prime(num)]
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> list[int]:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError('n must be an integer' )
if n <= 0:
raise ValueError('n must be >= 0' )
lowerCamelCase__ : int = []
for num in range(len(_UpperCAmelCase ) ):
lowerCamelCase__ : Union[str, Any] = 0
while 2 * i * i <= odd_composites[num]:
lowerCamelCase__ : Dict = odd_composites[num] - 2 * i * i
if is_prime(_UpperCAmelCase ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(_UpperCAmelCase ) == n:
return list_nums
return []
def SCREAMING_SNAKE_CASE ( ) -> int:
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 50 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _SCREAMING_SNAKE_CASE( unittest.TestCase ):
def __init__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=7 ,SCREAMING_SNAKE_CASE__=3 ,SCREAMING_SNAKE_CASE__=18 ,SCREAMING_SNAKE_CASE__=30 ,SCREAMING_SNAKE_CASE__=4_00 ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=[0.5, 0.5, 0.5] ,SCREAMING_SNAKE_CASE__=[0.5, 0.5, 0.5] ,SCREAMING_SNAKE_CASE__=False ,) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = size if size is not None else {'''height''': 20, '''width''': 20}
__SCREAMING_SNAKE_CASE :int = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__SCREAMING_SNAKE_CASE :List[Any] = parent
__SCREAMING_SNAKE_CASE :Optional[int] = batch_size
__SCREAMING_SNAKE_CASE :Union[str, Any] = num_channels
__SCREAMING_SNAKE_CASE :Union[str, Any] = image_size
__SCREAMING_SNAKE_CASE :Optional[int] = min_resolution
__SCREAMING_SNAKE_CASE :str = max_resolution
__SCREAMING_SNAKE_CASE :Optional[Any] = do_resize
__SCREAMING_SNAKE_CASE :List[str] = size
__SCREAMING_SNAKE_CASE :Union[str, Any] = do_center_crop
__SCREAMING_SNAKE_CASE :str = crop_size
__SCREAMING_SNAKE_CASE :int = do_normalize
__SCREAMING_SNAKE_CASE :Dict = image_mean
__SCREAMING_SNAKE_CASE :Union[str, Any] = image_std
__SCREAMING_SNAKE_CASE :Tuple = do_reduce_labels
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def __lowerCamelCase ( ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE :Optional[Any] = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
__SCREAMING_SNAKE_CASE :List[str] = Image.open(dataset[0]['''file'''] )
__SCREAMING_SNAKE_CASE :List[str] = Image.open(dataset[1]['''file'''] )
return image, map
def __lowerCamelCase ( ) -> Dict:
__SCREAMING_SNAKE_CASE :str = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
__SCREAMING_SNAKE_CASE :Optional[int] = Image.open(ds[0]['''file'''] )
__SCREAMING_SNAKE_CASE :Dict = Image.open(ds[1]['''file'''] )
__SCREAMING_SNAKE_CASE :Optional[Any] = Image.open(ds[2]['''file'''] )
__SCREAMING_SNAKE_CASE :Optional[int] = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE( A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Tuple = BeitImageProcessor if is_vision_available() else None
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = BeitImageProcessingTester(self )
@property
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'''do_resize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'''size''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'''do_center_crop''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'''center_crop''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'''do_normalize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'''image_mean''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'''image_std''' ) )
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'''height''': 20, '''width''': 20} )
self.assertEqual(image_processor.crop_size ,{'''height''': 18, '''width''': 18} )
self.assertEqual(image_processor.do_reduce_labels ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Dict = self.image_processing_class.from_dict(
self.image_processor_dict ,size=42 ,crop_size=84 ,reduce_labels=SCREAMING_SNAKE_CASE__ )
self.assertEqual(image_processor.size ,{'''height''': 42, '''width''': 42} )
self.assertEqual(image_processor.crop_size ,{'''height''': 84, '''width''': 84} )
self.assertEqual(image_processor.do_reduce_labels ,SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
pass
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__SCREAMING_SNAKE_CASE :List[str] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE :Tuple = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
__SCREAMING_SNAKE_CASE :Tuple = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__SCREAMING_SNAKE_CASE :int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE__ ,numpify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,np.ndarray )
# Test not batched input
__SCREAMING_SNAKE_CASE :Optional[int] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
__SCREAMING_SNAKE_CASE :List[str] = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__SCREAMING_SNAKE_CASE :Any = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE__ ,torchify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,torch.Tensor )
# Test not batched input
__SCREAMING_SNAKE_CASE :Optional[int] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
__SCREAMING_SNAKE_CASE :Dict = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__SCREAMING_SNAKE_CASE :int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE__ ,torchify=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :List[Any] = []
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
__SCREAMING_SNAKE_CASE :List[str] = image_processing(image_inputs[0] ,maps[0] ,return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
self.assertEqual(
encoding['''labels'''].shape ,(
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
self.assertEqual(encoding['''labels'''].dtype ,torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test batched
__SCREAMING_SNAKE_CASE :int = image_processing(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
self.assertEqual(
encoding['''labels'''].shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
self.assertEqual(encoding['''labels'''].dtype ,torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test not batched input (PIL images)
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Any = prepare_semantic_single_inputs()
__SCREAMING_SNAKE_CASE :Optional[Any] = image_processing(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
self.assertEqual(
encoding['''labels'''].shape ,(
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
self.assertEqual(encoding['''labels'''].dtype ,torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test batched input (PIL images)
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Optional[int] = prepare_semantic_batch_inputs()
__SCREAMING_SNAKE_CASE :int = image_processing(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape ,(
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
self.assertEqual(
encoding['''labels'''].shape ,(
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
self.assertEqual(encoding['''labels'''].dtype ,torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Any = prepare_semantic_single_inputs()
__SCREAMING_SNAKE_CASE :Dict = image_processing(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 1_50 )
__SCREAMING_SNAKE_CASE :List[str] = True
__SCREAMING_SNAKE_CASE :Tuple = image_processing(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 ) | 239 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def __lowerCamelCase ( a_ : float , a_ : float , a_ : int ) -> float:
__SCREAMING_SNAKE_CASE :List[Any] = x
__SCREAMING_SNAKE_CASE :List[Any] = y
for step in range(a_ ): # noqa: B007
__SCREAMING_SNAKE_CASE :Dict = a * a - b * b + x
__SCREAMING_SNAKE_CASE :Tuple = 2 * a * b + y
__SCREAMING_SNAKE_CASE :Dict = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def __lowerCamelCase ( a_ : float ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (2_55, 2_55, 2_55)
def __lowerCamelCase ( a_ : float ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(a_ , 1 , 1 ) )
def __lowerCamelCase ( a_ : int = 8_00 , a_ : int = 6_00 , a_ : float = -0.6 , a_ : float = 0 , a_ : float = 3.2 , a_ : int = 50 , a_ : bool = True , ) -> Image.Image:
__SCREAMING_SNAKE_CASE :Optional[int] = Image.new('''RGB''' , (image_width, image_height) )
__SCREAMING_SNAKE_CASE :Tuple = img.load()
# loop through the image-coordinates
for image_x in range(a_ ):
for image_y in range(a_ ):
# determine the figure-coordinates based on the image-coordinates
__SCREAMING_SNAKE_CASE :Dict = figure_width / image_width * image_height
__SCREAMING_SNAKE_CASE :str = figure_center_x + (image_x / image_width - 0.5) * figure_width
__SCREAMING_SNAKE_CASE :Tuple = figure_center_y + (image_y / image_height - 0.5) * figure_height
__SCREAMING_SNAKE_CASE :List[Any] = get_distance(a_ , a_ , a_ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
__SCREAMING_SNAKE_CASE :Optional[int] = get_color_coded_rgb(a_ )
else:
__SCREAMING_SNAKE_CASE :Optional[Any] = get_black_and_white_rgb(a_ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowerCamelCase_ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show() | 239 | 1 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> Union[str, Any]:
__lowercase : Dict = tempfile.mkdtemp()
# fmt: off
__lowercase : Optional[Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
__lowercase : Optional[int] = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
__lowercase : Tuple = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
__lowercase : Optional[Any] = {'unk_token': '<unk>'}
__lowercase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__lowercase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowercase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowercase_ ) )
__lowercase : List[str] = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'image_std': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
__lowercase : List[Any] = os.path.join(self.tmpdirname , lowercase_ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(lowercase_ , lowercase_ )
def _lowerCamelCase ( self , **UpperCamelCase_ ) -> List[str]:
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowercase_ )
def _lowerCamelCase ( self , **UpperCamelCase_ ) -> Tuple:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowercase_ )
def _lowerCamelCase ( self , **UpperCamelCase_ ) -> str:
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **lowercase_ )
def _lowerCamelCase ( self ) -> str:
shutil.rmtree(self.tmpdirname )
def _lowerCamelCase ( self ) -> Any:
__lowercase : Dict = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowercase : Optional[int] = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCamelCase ( self ) -> Tuple:
__lowercase : Optional[Any] = self.get_tokenizer()
__lowercase : str = self.get_rust_tokenizer()
__lowercase : Dict = self.get_image_processor()
__lowercase : List[Any] = CLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
processor_slow.save_pretrained(self.tmpdirname )
__lowercase : Tuple = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase_ )
__lowercase : int = CLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
processor_fast.save_pretrained(self.tmpdirname )
__lowercase : Union[str, Any] = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowercase_ )
self.assertIsInstance(processor_fast.tokenizer , lowercase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowercase_ )
self.assertIsInstance(processor_fast.image_processor , lowercase_ )
def _lowerCamelCase ( self ) -> List[Any]:
__lowercase : List[str] = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowercase : int = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__lowercase : List[str] = self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0 )
__lowercase : int = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=lowercase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase_ )
def _lowerCamelCase ( self ) -> List[Any]:
__lowercase : Union[str, Any] = self.get_image_processor()
__lowercase : int = self.get_tokenizer()
__lowercase : List[Any] = CLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
__lowercase : int = self.prepare_image_inputs()
__lowercase : str = image_processor(lowercase_ , return_tensors='''np''' )
__lowercase : Dict = processor(images=lowercase_ , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : List[Any] = self.get_image_processor()
__lowercase : Dict = self.get_tokenizer()
__lowercase : Any = CLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
__lowercase : Any = 'lower newer'
__lowercase : Dict = processor(text=lowercase_ )
__lowercase : Union[str, Any] = tokenizer(lowercase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowerCamelCase ( self ) -> Dict:
__lowercase : Tuple = self.get_image_processor()
__lowercase : int = self.get_tokenizer()
__lowercase : Dict = CLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
__lowercase : List[Any] = 'lower newer'
__lowercase : List[Any] = self.prepare_image_inputs()
__lowercase : List[Any] = processor(text=lowercase_ , images=lowercase_ )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(lowercase_ ):
processor()
def _lowerCamelCase ( self ) -> int:
__lowercase : Union[str, Any] = self.get_image_processor()
__lowercase : List[Any] = self.get_tokenizer()
__lowercase : Optional[int] = CLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
__lowercase : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowercase : Optional[int] = processor.batch_decode(lowercase_ )
__lowercase : List[str] = tokenizer.batch_decode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def _lowerCamelCase ( self ) -> Any:
__lowercase : Union[str, Any] = self.get_image_processor()
__lowercase : Tuple = self.get_tokenizer()
__lowercase : Optional[Any] = CLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
__lowercase : Any = 'lower newer'
__lowercase : str = self.prepare_image_inputs()
__lowercase : Any = processor(text=lowercase_ , images=lowercase_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 249 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowerCamelCase = 'python tqdm regex requests packaging filelock numpy tokenizers'.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('dataclasses')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('importlib_metadata')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def a_ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str=None ):
'''simple docstring'''
require_version(deps[pkg] , SCREAMING_SNAKE_CASE__ )
| 199 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Dict = logging.get_logger(__name__)
lowerCamelCase : Any = {
'''naver-clova-ix/donut-base''': '''https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json''',
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Optional[int] = '''donut-swin'''
_A : Dict = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Union[str, Any] , __a : Optional[Any]=224 , __a : List[Any]=4 , __a : List[str]=3 , __a : List[Any]=96 , __a : Optional[Any]=[2, 2, 6, 2] , __a : Union[str, Any]=[3, 6, 12, 24] , __a : Any=7 , __a : str=4.0 , __a : Dict=True , __a : List[str]=0.0 , __a : Optional[Any]=0.0 , __a : Any=0.1 , __a : List[str]="gelu" , __a : str=False , __a : Tuple=0.02 , __a : List[str]=1E-5 , **__a : Optional[Any] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**__a )
__lowercase : Dict = image_size
__lowercase : List[str] = patch_size
__lowercase : Optional[Any] = num_channels
__lowercase : List[str] = embed_dim
__lowercase : Dict = depths
__lowercase : int = len(__a )
__lowercase : List[Any] = num_heads
__lowercase : Tuple = window_size
__lowercase : Optional[int] = mlp_ratio
__lowercase : Optional[Any] = qkv_bias
__lowercase : Union[str, Any] = hidden_dropout_prob
__lowercase : Any = attention_probs_dropout_prob
__lowercase : Any = drop_path_rate
__lowercase : List[str] = hidden_act
__lowercase : Tuple = use_absolute_embeddings
__lowercase : Optional[Any] = layer_norm_eps
__lowercase : Tuple = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__lowercase : str = int(embed_dim * 2 ** (len(__a ) - 1) ) | 352 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Dict = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
__lowercase : List[str] = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
__lowercase : Optional[Any] = model(__a )["""last_hidden_state"""]
__lowercase : Any = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , __a )
# compare the actual values for a slice.
__lowercase : Dict = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) ) | 306 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
a = (3, 9, -11, 0, 7, 5, 1, -1)
a = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class lowercase_ :
'''simple docstring'''
UpperCAmelCase : int
UpperCAmelCase : Node | None
class lowercase_ :
'''simple docstring'''
def __init__( self : int , _UpperCAmelCase : Iterable[int] ):
_A = None
for i in sorted(_UpperCAmelCase , reverse=_UpperCAmelCase ):
_A = Node(_UpperCAmelCase , self.head )
def __iter__( self : Union[str, Any] ):
_A = self.head
while node:
yield node.data
_A = node.next_node
def __len__( self : Optional[int] ):
return sum(1 for _ in self )
def __str__( self : Dict ):
return " -> ".join([str(_UpperCAmelCase ) for node in self] )
def _snake_case ( _snake_case : SortedLinkedList , _snake_case : SortedLinkedList ) -> SortedLinkedList:
'''simple docstring'''
return SortedLinkedList(list(_snake_case ) + list(_snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
a = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 315 |
"""simple docstring"""
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class lowercase_ ( nn.Module ):
'''simple docstring'''
UpperCAmelCase : int
UpperCAmelCase : int
UpperCAmelCase : float = 0.0
UpperCAmelCase : int = 1
UpperCAmelCase : int = 1
UpperCAmelCase : bool = True
UpperCAmelCase : bool = False
UpperCAmelCase : bool = False
UpperCAmelCase : bool = False
UpperCAmelCase : jnp.dtype = jnp.floataa
def lowerCAmelCase_ ( self : List[str] ):
_A = []
_A = []
for i in range(self.num_layers ):
_A = self.in_channels if i == 0 else self.out_channels
_A = FlaxResnetBlockaD(
in_channels=_UpperCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCAmelCase )
_A = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_UpperCAmelCase )
_A = resnets
_A = attentions
if self.add_downsample:
_A = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple=True ):
_A = ()
for resnet, attn in zip(self.resnets , self.attentions ):
_A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
_A = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
output_states += (hidden_states,)
if self.add_downsample:
_A = self.downsamplers_a(_UpperCAmelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class lowercase_ ( nn.Module ):
'''simple docstring'''
UpperCAmelCase : int
UpperCAmelCase : int
UpperCAmelCase : float = 0.0
UpperCAmelCase : int = 1
UpperCAmelCase : bool = True
UpperCAmelCase : jnp.dtype = jnp.floataa
def lowerCAmelCase_ ( self : List[Any] ):
_A = []
for i in range(self.num_layers ):
_A = self.in_channels if i == 0 else self.out_channels
_A = FlaxResnetBlockaD(
in_channels=_UpperCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCAmelCase )
_A = resnets
if self.add_downsample:
_A = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : List[str]=True ):
_A = ()
for resnet in self.resnets:
_A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
output_states += (hidden_states,)
if self.add_downsample:
_A = self.downsamplers_a(_UpperCAmelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class lowercase_ ( nn.Module ):
'''simple docstring'''
UpperCAmelCase : int
UpperCAmelCase : int
UpperCAmelCase : int
UpperCAmelCase : float = 0.0
UpperCAmelCase : int = 1
UpperCAmelCase : int = 1
UpperCAmelCase : bool = True
UpperCAmelCase : bool = False
UpperCAmelCase : bool = False
UpperCAmelCase : bool = False
UpperCAmelCase : jnp.dtype = jnp.floataa
def lowerCAmelCase_ ( self : Any ):
_A = []
_A = []
for i in range(self.num_layers ):
_A = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_A = self.prev_output_channel if i == 0 else self.out_channels
_A = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCAmelCase )
_A = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_UpperCAmelCase )
_A = resnets
_A = attentions
if self.add_upsample:
_A = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any]=True ):
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
_A = res_hidden_states_tuple[-1]
_A = res_hidden_states_tuple[:-1]
_A = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
_A = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
if self.add_upsample:
_A = self.upsamplers_a(_UpperCAmelCase )
return hidden_states
class lowercase_ ( nn.Module ):
'''simple docstring'''
UpperCAmelCase : int
UpperCAmelCase : int
UpperCAmelCase : int
UpperCAmelCase : float = 0.0
UpperCAmelCase : int = 1
UpperCAmelCase : bool = True
UpperCAmelCase : jnp.dtype = jnp.floataa
def lowerCAmelCase_ ( self : Any ):
_A = []
for i in range(self.num_layers ):
_A = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_A = self.prev_output_channel if i == 0 else self.out_channels
_A = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCAmelCase )
_A = resnets
if self.add_upsample:
_A = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : int , _UpperCAmelCase : int , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[int]=True ):
for resnet in self.resnets:
# pop res hidden states
_A = res_hidden_states_tuple[-1]
_A = res_hidden_states_tuple[:-1]
_A = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
if self.add_upsample:
_A = self.upsamplers_a(_UpperCAmelCase )
return hidden_states
class lowercase_ ( nn.Module ):
'''simple docstring'''
UpperCAmelCase : int
UpperCAmelCase : float = 0.0
UpperCAmelCase : int = 1
UpperCAmelCase : int = 1
UpperCAmelCase : bool = False
UpperCAmelCase : bool = False
UpperCAmelCase : jnp.dtype = jnp.floataa
def lowerCAmelCase_ ( self : Dict ):
# there is always at least one resnet
_A = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
_A = []
for _ in range(self.num_layers ):
_A = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_UpperCAmelCase )
_A = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCAmelCase )
_A = resnets
_A = attentions
def __call__( self : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int]=True ):
_A = self.resnets[0](_UpperCAmelCase , _UpperCAmelCase )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
_A = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
_A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
return hidden_states
| 315 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : "DiagonalGaussianDistribution"
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Any = True
@register_to_config
def __init__( self : Dict, a_ : int = 3, a_ : int = 3, a_ : Tuple[str] = ("DownEncoderBlock2D",), a_ : Tuple[str] = ("UpDecoderBlock2D",), a_ : Tuple[int] = (64,), a_ : int = 1, a_ : str = "silu", a_ : int = 4, a_ : int = 32, a_ : int = 32, a_ : float = 0.18_215, ):
"""simple docstring"""
super().__init__()
# pass init params to Encoder
UpperCamelCase__ = Encoder(
in_channels=a_, out_channels=a_, down_block_types=a_, block_out_channels=a_, layers_per_block=a_, act_fn=a_, norm_num_groups=a_, double_z=a_, )
# pass init params to Decoder
UpperCamelCase__ = Decoder(
in_channels=a_, out_channels=a_, up_block_types=a_, block_out_channels=a_, layers_per_block=a_, norm_num_groups=a_, act_fn=a_, )
UpperCamelCase__ = nn.Convad(2 * latent_channels, 2 * latent_channels, 1 )
UpperCamelCase__ = nn.Convad(a_, a_, 1 )
UpperCamelCase__ = False
UpperCamelCase__ = False
# only relevant if vae tiling is enabled
UpperCamelCase__ = self.config.sample_size
UpperCamelCase__ = (
self.config.sample_size[0]
if isinstance(self.config.sample_size, (list, tuple) )
else self.config.sample_size
)
UpperCamelCase__ = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
UpperCamelCase__ = 0.25
def lowercase_ ( self : Tuple, a_ : Tuple, a_ : List[str]=False ):
"""simple docstring"""
if isinstance(a_, (Encoder, Decoder) ):
UpperCamelCase__ = value
def lowercase_ ( self : Optional[Any], a_ : bool = True ):
"""simple docstring"""
UpperCamelCase__ = use_tiling
def lowercase_ ( self : List[str] ):
"""simple docstring"""
self.enable_tiling(a_ )
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = True
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = {}
def fn_recursive_add_processors(a_ : str, a_ : torch.nn.Module, a_ : Dict[str, AttentionProcessor] ):
if hasattr(a_, "set_processor" ):
UpperCamelCase__ = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'{name}.{sub_name}', a_, a_ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(a_, a_, a_ )
return processors
def lowercase_ ( self : List[str], a_ : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
"""simple docstring"""
UpperCamelCase__ = len(self.attn_processors.keys() )
if isinstance(a_, a_ ) and len(a_ ) != count:
raise ValueError(
f'A dict of processors was passed, but the number of processors {len(a_ )} does not match the'
f' number of attention layers: {count}. Please make sure to pass {count} processor classes.' )
def fn_recursive_attn_processor(a_ : str, a_ : torch.nn.Module, a_ : int ):
if hasattr(a_, "set_processor" ):
if not isinstance(a_, a_ ):
module.set_processor(a_ )
else:
module.set_processor(processor.pop(f'{name}.processor' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'{name}.{sub_name}', a_, a_ )
for name, module in self.named_children():
fn_recursive_attn_processor(a_, a_, a_ )
def lowercase_ ( self : Any ):
"""simple docstring"""
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def lowercase_ ( self : Any, a_ : torch.FloatTensor, a_ : bool = True ):
"""simple docstring"""
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(a_, return_dict=a_ )
if self.use_slicing and x.shape[0] > 1:
UpperCamelCase__ = [self.encoder(a_ ) for x_slice in x.split(1 )]
UpperCamelCase__ = torch.cat(a_ )
else:
UpperCamelCase__ = self.encoder(a_ )
UpperCamelCase__ = self.quant_conv(a_ )
UpperCamelCase__ = DiagonalGaussianDistribution(a_ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=a_ )
def lowercase_ ( self : str, a_ : torch.FloatTensor, a_ : bool = True ):
"""simple docstring"""
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(a_, return_dict=a_ )
UpperCamelCase__ = self.post_quant_conv(a_ )
UpperCamelCase__ = self.decoder(a_ )
if not return_dict:
return (dec,)
return DecoderOutput(sample=a_ )
@apply_forward_hook
def lowercase_ ( self : Dict, a_ : torch.FloatTensor, a_ : bool = True ):
"""simple docstring"""
if self.use_slicing and z.shape[0] > 1:
UpperCamelCase__ = [self._decode(a_ ).sample for z_slice in z.split(1 )]
UpperCamelCase__ = torch.cat(a_ )
else:
UpperCamelCase__ = self._decode(a_ ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=a_ )
def lowercase_ ( self : Optional[int], a_ : Optional[int], a_ : Tuple, a_ : Dict ):
"""simple docstring"""
UpperCamelCase__ = min(a.shape[2], b.shape[2], a_ )
for y in range(a_ ):
UpperCamelCase__ = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def lowercase_ ( self : Optional[Any], a_ : Dict, a_ : Tuple, a_ : Any ):
"""simple docstring"""
UpperCamelCase__ = min(a.shape[3], b.shape[3], a_ )
for x in range(a_ ):
UpperCamelCase__ = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def lowercase_ ( self : List[Any], a_ : torch.FloatTensor, a_ : bool = True ):
"""simple docstring"""
UpperCamelCase__ = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
UpperCamelCase__ = int(self.tile_latent_min_size * self.tile_overlap_factor )
UpperCamelCase__ = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
UpperCamelCase__ = []
for i in range(0, x.shape[2], a_ ):
UpperCamelCase__ = []
for j in range(0, x.shape[3], a_ ):
UpperCamelCase__ = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
UpperCamelCase__ = self.encoder(a_ )
UpperCamelCase__ = self.quant_conv(a_ )
row.append(a_ )
rows.append(a_ )
UpperCamelCase__ = []
for i, row in enumerate(a_ ):
UpperCamelCase__ = []
for j, tile in enumerate(a_ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCamelCase__ = self.blend_v(rows[i - 1][j], a_, a_ )
if j > 0:
UpperCamelCase__ = self.blend_h(row[j - 1], a_, a_ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(a_, dim=3 ) )
UpperCamelCase__ = torch.cat(a_, dim=2 )
UpperCamelCase__ = DiagonalGaussianDistribution(a_ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=a_ )
def lowercase_ ( self : str, a_ : torch.FloatTensor, a_ : bool = True ):
"""simple docstring"""
UpperCamelCase__ = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
UpperCamelCase__ = int(self.tile_sample_min_size * self.tile_overlap_factor )
UpperCamelCase__ = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
UpperCamelCase__ = []
for i in range(0, z.shape[2], a_ ):
UpperCamelCase__ = []
for j in range(0, z.shape[3], a_ ):
UpperCamelCase__ = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
UpperCamelCase__ = self.post_quant_conv(a_ )
UpperCamelCase__ = self.decoder(a_ )
row.append(a_ )
rows.append(a_ )
UpperCamelCase__ = []
for i, row in enumerate(a_ ):
UpperCamelCase__ = []
for j, tile in enumerate(a_ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCamelCase__ = self.blend_v(rows[i - 1][j], a_, a_ )
if j > 0:
UpperCamelCase__ = self.blend_h(row[j - 1], a_, a_ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(a_, dim=3 ) )
UpperCamelCase__ = torch.cat(a_, dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=a_ )
def lowercase_ ( self : Tuple, a_ : torch.FloatTensor, a_ : bool = False, a_ : bool = True, a_ : Optional[torch.Generator] = None, ):
"""simple docstring"""
UpperCamelCase__ = sample
UpperCamelCase__ = self.encode(a_ ).latent_dist
if sample_posterior:
UpperCamelCase__ = posterior.sample(generator=a_ )
else:
UpperCamelCase__ = posterior.mode()
UpperCamelCase__ = self.decode(a_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=a_ ) | 31 |
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
__lowercase: int = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Tuple=False ) -> Union[str, Any]:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
if not is_sharded:
UpperCamelCase__ = os.path.abspath(_UpperCamelCase )
logger.info(F'Loading PyTorch weights from {pt_path}' )
UpperCamelCase__ = torch.load(_UpperCamelCase , map_location="cpu" )
logger.info(F'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' )
UpperCamelCase__ = convert_pytorch_state_dict_to_flax(_UpperCamelCase , _UpperCamelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
UpperCamelCase__ = convert_pytorch_sharded_state_dict_to_flax(_UpperCamelCase , _UpperCamelCase )
return flax_state_dict
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple[str] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, jnp.ndarray] , _UpperCamelCase : str , ) -> (Tuple[str], np.ndarray):
'''simple docstring'''
def is_key_or_prefix_key_in_dict(_UpperCamelCase : Tuple[str] ) -> bool:
return len(set(_UpperCamelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
UpperCamelCase__ = pt_tuple_key[:-1] + ("scale",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
UpperCamelCase__ = pt_tuple_key[:-1] + ("mean",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
UpperCamelCase__ = pt_tuple_key[:-1] + ("var",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
UpperCamelCase__ = pt_tuple_key[:-1] + ("embedding",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCamelCase__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
UpperCamelCase__ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCamelCase__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
UpperCamelCase__ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCamelCase__ = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCamelCase__ = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
UpperCamelCase__ = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
UpperCamelCase__ = pt_tuple_key[-2] + "_g"
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
UpperCamelCase__ = pt_tuple_key[-2] + "_v"
if name is not None:
UpperCamelCase__ = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCamelCase__ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
UpperCamelCase__ = flax_model.params["params"]
else:
UpperCamelCase__ = flax_model.params
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCamelCase__ = flatten_dict(flax_model.params["batch_stats"] )
random_flax_state_dict.update(_UpperCamelCase )
UpperCamelCase__ = {}
UpperCamelCase__ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
UpperCamelCase__ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase__ = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
UpperCamelCase__ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCamelCase__ , UpperCamelCase__ = rename_key_and_reshape_tensor(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# add model prefix if necessary
UpperCamelCase__ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
continue
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
else:
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
return unflatten_dict(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any ) -> Any:
'''simple docstring'''
import torch
# Load the index
UpperCamelCase__ = {}
for shard_file in shard_filenames:
# load using msgpack utils
UpperCamelCase__ = torch.load(_UpperCamelCase )
UpperCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCamelCase__ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCamelCase__ = flax_model.params["params"]
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params["batch_stats"] ) )
else:
UpperCamelCase__ = flax_model.params
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
UpperCamelCase__ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
UpperCamelCase__ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase__ = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
UpperCamelCase__ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCamelCase__ , UpperCamelCase__ = rename_key_and_reshape_tensor(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# add model prefix if necessary
UpperCamelCase__ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
if "var" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
continue
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
else:
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
return unflatten_dict(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = os.path.abspath(_UpperCamelCase )
logger.info(F'Loading Flax weights from {flax_checkpoint_path}' )
# import correct flax class
UpperCamelCase__ = getattr(_UpperCamelCase , "Flax" + model.__class__.__name__ )
# load flax weight dict
with open(_UpperCamelCase , "rb" ) as state_f:
try:
UpperCamelCase__ = from_bytes(_UpperCamelCase , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(_UpperCamelCase , _UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : Any ) -> Optional[Any]:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
# check if we have bf16 weights
UpperCamelCase__ = flatten_dict(jax.tree_util.tree_map(lambda _UpperCamelCase : x.dtype == jnp.bfloataa , _UpperCamelCase ) ).values()
if any(_UpperCamelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model." )
UpperCamelCase__ = jax.tree_util.tree_map(
lambda _UpperCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _UpperCamelCase )
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
UpperCamelCase__ = pt_model.state_dict()
UpperCamelCase__ = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split("." )[0] for k in pt_model_dict.keys()}
)
UpperCamelCase__ = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split("." )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
UpperCamelCase__ = []
UpperCamelCase__ = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
UpperCamelCase__ = flax_key_tuple[0] == pt_model.base_model_prefix
UpperCamelCase__ = ".".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(_UpperCamelCase ) not in pt_model_dict:
# conv layer
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
UpperCamelCase__ = jnp.transpose(_UpperCamelCase , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_UpperCamelCase ) not in pt_model_dict:
# linear layer
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
UpperCamelCase__ = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("running_mean",)
elif "var" in flax_key_tuple[-1]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("running_var",)
if "batch_stats" in flax_state:
UpperCamelCase__ = ".".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
UpperCamelCase__ = ".".join(_UpperCamelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
UpperCamelCase__ = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
UpperCamelCase__ = key.split("." )
UpperCamelCase__ = None
if key_components[-3::2] == ["parametrizations", "original0"]:
UpperCamelCase__ = key_components[-2] + "_g"
elif key_components[-3::2] == ["parametrizations", "original1"]:
UpperCamelCase__ = key_components[-2] + "_v"
if name is not None:
UpperCamelCase__ = key_components[:-3] + [name]
UpperCamelCase__ = ".".join(_UpperCamelCase )
UpperCamelCase__ = key
if flax_key in special_pt_names:
UpperCamelCase__ = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
UpperCamelCase__ = np.asarray(_UpperCamelCase ) if not isinstance(_UpperCamelCase , np.ndarray ) else flax_tensor
UpperCamelCase__ = torch.from_numpy(_UpperCamelCase )
# remove from missing keys
missing_keys.remove(_UpperCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_UpperCamelCase )
pt_model.load_state_dict(_UpperCamelCase )
# re-transform missing_keys to list
UpperCamelCase__ = list(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
logger.warning(
"Some weights of the Flax model were not used when initializing the PyTorch model"
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
" FlaxBertForSequenceClassification model)." )
else:
logger.warning(F'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' )
if len(_UpperCamelCase ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
" use it for predictions and inference." )
else:
logger.warning(
F'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'
"If your task is similar to the task the model of the checkpoint was trained on, "
F'you can already use {pt_model.__class__.__name__} for predictions without further training.' )
return pt_model | 31 | 1 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Any = RemBertConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
print("Building PyTorch model from configuration: {}".format(str(SCREAMING_SNAKE_CASE_ ) ) )
lowerCamelCase : Tuple = RemBertModel(SCREAMING_SNAKE_CASE_ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save pytorch-model
print("Save PyTorch model to {}".format(SCREAMING_SNAKE_CASE_ ) )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--rembert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained RemBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_snake_case = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 283 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False ):
'''simple docstring'''
lowerCamelCase : Tuple = "backbone." if is_semantic else ""
lowerCamelCase : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""{prefix}blocks.{i}.norm1.weight""", f"""beit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm1.bias""", f"""beit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""{prefix}blocks.{i}.attn.proj.weight""", f"""beit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""{prefix}blocks.{i}.attn.proj.bias""", f"""beit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm2.weight""", f"""beit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm2.bias""", f"""beit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.weight""", f"""beit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.bias""", f"""beit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.weight""", f"""beit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.bias""", f"""beit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
(f"""{prefix}cls_token""", "beit.embeddings.cls_token"),
(f"""{prefix}patch_embed.proj.weight""", "beit.embeddings.patch_embeddings.projection.weight"),
(f"""{prefix}patch_embed.proj.bias""", "beit.embeddings.patch_embeddings.projection.bias"),
(f"""{prefix}pos_embed""", "beit.embeddings.position_embeddings"),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("mask_token", "beit.embeddings.mask_token"),
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("fc_norm.weight", "beit.pooler.layernorm.weight"),
("fc_norm.bias", "beit.pooler.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
lowerCamelCase : Optional[Any] = "backbone." if is_semantic else ""
# queries, keys and values
lowerCamelCase : Optional[Any] = state_dict.pop(f"""{prefix}blocks.{i}.attn.qkv.weight""" )
lowerCamelCase : Optional[Any] = state_dict.pop(f"""{prefix}blocks.{i}.attn.q_bias""" )
lowerCamelCase : Tuple = state_dict.pop(f"""{prefix}blocks.{i}.attn.v_bias""" )
lowerCamelCase : str = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase : Any = q_bias
lowerCamelCase : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase : Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase : int = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
lowerCamelCase : Any = state_dict.pop(f"""{prefix}blocks.{i}.gamma_1""" )
lowerCamelCase : Any = state_dict.pop(f"""{prefix}blocks.{i}.gamma_2""" )
lowerCamelCase : int = gamma_a
lowerCamelCase : Optional[Any] = gamma_a
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = dct.pop(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : List[Any] = val
def lowercase_( ):
'''simple docstring'''
lowerCamelCase : Dict = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase : Optional[Any] = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ):
'''simple docstring'''
lowerCamelCase : List[Any] = False if "rvlcdip" in checkpoint_url else True
lowerCamelCase : str = BeitConfig(use_absolute_position_embeddings=SCREAMING_SNAKE_CASE_ , use_mask_token=SCREAMING_SNAKE_CASE_ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
lowerCamelCase : Union[str, Any] = 1024
lowerCamelCase : Any = 4096
lowerCamelCase : str = 24
lowerCamelCase : List[Any] = 16
# labels
if "rvlcdip" in checkpoint_url:
lowerCamelCase : Optional[Any] = 16
lowerCamelCase : Tuple = "huggingface/label-files"
lowerCamelCase : List[str] = "rvlcdip-id2label.json"
lowerCamelCase : str = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) )
lowerCamelCase : Any = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
lowerCamelCase : Tuple = idalabel
lowerCamelCase : Dict = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
lowerCamelCase : int = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location="cpu" )["model"]
lowerCamelCase : Tuple = create_rename_keys(SCREAMING_SNAKE_CASE_ , has_lm_head=SCREAMING_SNAKE_CASE_ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
read_in_q_k_v(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , has_lm_head=SCREAMING_SNAKE_CASE_ )
# load HuggingFace model
lowerCamelCase : List[Any] = BeitForMaskedImageModeling(SCREAMING_SNAKE_CASE_ ) if has_lm_head else BeitForImageClassification(SCREAMING_SNAKE_CASE_ )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check outputs on an image
lowerCamelCase : str = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Any = prepare_img()
lowerCamelCase : Optional[int] = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="pt" )
lowerCamelCase : Optional[Any] = encoding["pixel_values"]
lowerCamelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Dict = outputs.logits
# verify logits
lowerCamelCase : List[Any] = [1, 16] if "rvlcdip" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(SCREAMING_SNAKE_CASE_ ), "Shape of logits not as expected"
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
if has_lm_head:
lowerCamelCase : Optional[Any] = "dit-base" if "base" in checkpoint_url else "dit-large"
else:
lowerCamelCase : Dict = "dit-base-finetuned-rvlcdip" if "dit-b" in checkpoint_url else "dit-large-finetuned-rvlcdip"
image_processor.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=SCREAMING_SNAKE_CASE_ , )
model.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=SCREAMING_SNAKE_CASE_ , )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
_snake_case = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 283 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case : List[Any] = logging.get_logger(__name__)
snake_case : Any = {
'''microsoft/unispeech-sat-base-100h-libri-ft''': (
'''https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'''
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class _snake_case ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = "unispeech-sat"
def __init__( self , _lowerCamelCase=32 , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-5 , _lowerCamelCase="group" , _lowerCamelCase="gelu" , _lowerCamelCase=(512, 512, 512, 512, 512, 512, 512) , _lowerCamelCase=(5, 2, 2, 2, 2, 2, 2) , _lowerCamelCase=(10, 3, 3, 3, 3, 2, 2) , _lowerCamelCase=False , _lowerCamelCase=128 , _lowerCamelCase=16 , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=0.05 , _lowerCamelCase=10 , _lowerCamelCase=2 , _lowerCamelCase=0.0 , _lowerCamelCase=10 , _lowerCamelCase=0 , _lowerCamelCase=320 , _lowerCamelCase=2 , _lowerCamelCase=0.1 , _lowerCamelCase=100 , _lowerCamelCase=256 , _lowerCamelCase=256 , _lowerCamelCase=0.1 , _lowerCamelCase="mean" , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=256 , _lowerCamelCase=(512, 512, 512, 512, 1500) , _lowerCamelCase=(5, 3, 3, 1, 1) , _lowerCamelCase=(1, 2, 3, 1, 1) , _lowerCamelCase=512 , _lowerCamelCase=0 , _lowerCamelCase=1 , _lowerCamelCase=2 , _lowerCamelCase=504 , **_lowerCamelCase , ):
super().__init__(**_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase )
a :Union[str, Any] = hidden_size
a :Union[str, Any] = feat_extract_norm
a :Any = feat_extract_activation
a :List[Any] = list(_UpperCAmelCase )
a :Optional[int] = list(_UpperCAmelCase )
a :int = list(_UpperCAmelCase )
a :Any = conv_bias
a :List[str] = num_conv_pos_embeddings
a :List[str] = num_conv_pos_embedding_groups
a :int = len(self.conv_dim )
a :Dict = num_hidden_layers
a :List[Any] = intermediate_size
a :Dict = hidden_act
a :Optional[Any] = num_attention_heads
a :Union[str, Any] = hidden_dropout
a :List[Any] = attention_dropout
a :str = activation_dropout
a :Optional[Any] = feat_proj_dropout
a :Optional[int] = final_dropout
a :Any = layerdrop
a :int = layer_norm_eps
a :Any = initializer_range
a :Union[str, Any] = vocab_size
a :Optional[Any] = num_clusters
a :Dict = do_stable_layer_norm
a :List[str] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a :Dict = apply_spec_augment
a :Union[str, Any] = mask_time_prob
a :List[str] = mask_time_length
a :Union[str, Any] = mask_time_min_masks
a :str = mask_feature_prob
a :Dict = mask_feature_length
a :List[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
a :Tuple = num_codevectors_per_group
a :Optional[Any] = num_codevector_groups
a :int = contrastive_logits_temperature
a :Any = feat_quantizer_dropout
a :int = num_negatives
a :Optional[Any] = codevector_dim
a :int = proj_codevector_dim
a :str = diversity_loss_weight
# ctc loss
a :int = ctc_loss_reduction
a :Union[str, Any] = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
a :Optional[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
a :Union[str, Any] = list(_UpperCAmelCase )
a :Tuple = list(_UpperCAmelCase )
a :Union[str, Any] = list(_UpperCAmelCase )
a :Tuple = xvector_output_dim
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 352 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
snake_case : Any = pytest.mark.integration
@pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] )
def __lowerCamelCase ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any] ):
"""simple docstring"""
inspect_dataset(UpperCAmelCase_ , UpperCAmelCase_ )
a :List[Any] = path + '''.py'''
assert script_name in os.listdir(UpperCAmelCase_ )
assert "__pycache__" not in os.listdir(UpperCAmelCase_ )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' , ['''accuracy'''] )
def __lowerCamelCase ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any ):
"""simple docstring"""
inspect_metric(UpperCAmelCase_ , UpperCAmelCase_ )
a :Dict = path + '''.py'''
assert script_name in os.listdir(UpperCAmelCase_ )
assert "__pycache__" not in os.listdir(UpperCAmelCase_ )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def __lowerCamelCase ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str ):
"""simple docstring"""
a :List[str] = get_dataset_config_info(UpperCAmelCase_ , config_name=UpperCAmelCase_ )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def __lowerCamelCase ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple ):
"""simple docstring"""
with pytest.raises(UpperCAmelCase_ ):
get_dataset_config_info(UpperCAmelCase_ , config_name=UpperCAmelCase_ )
@pytest.mark.parametrize(
'''path, expected''' , [
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] , )
def __lowerCamelCase ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int ):
"""simple docstring"""
a :List[str] = get_dataset_config_names(UpperCAmelCase_ )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' , [
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] , )
def __lowerCamelCase ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int] ):
"""simple docstring"""
a :Optional[int] = get_dataset_infos(UpperCAmelCase_ )
assert list(infos.keys() ) == expected_configs
a :Union[str, Any] = expected_configs[0]
assert expected_config in infos
a :List[Any] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def __lowerCamelCase ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] ):
"""simple docstring"""
a :Union[str, Any] = get_dataset_infos(UpperCAmelCase_ )
assert expected_config in infos
a :int = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str ):
"""simple docstring"""
with pytest.raises(UpperCAmelCase_ ):
get_dataset_split_names(UpperCAmelCase_ , config_name=UpperCAmelCase_ )
| 281 | 0 |
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class lowercase__ ( _UpperCAmelCase ):
def __init__( self : Tuple , UpperCAmelCase_ : UNetaDModel , UpperCAmelCase_ : UNetaDModel , UpperCAmelCase_ : DDPMScheduler , UpperCAmelCase_ : Tuple , ):
super().__init__()
SCREAMING_SNAKE_CASE__ = value_function
SCREAMING_SNAKE_CASE__ = unet
SCREAMING_SNAKE_CASE__ = scheduler
SCREAMING_SNAKE_CASE__ = env
SCREAMING_SNAKE_CASE__ = env.get_dataset()
SCREAMING_SNAKE_CASE__ = {}
for key in self.data.keys():
try:
SCREAMING_SNAKE_CASE__ = self.data[key].mean()
except: # noqa: E722
pass
SCREAMING_SNAKE_CASE__ = {}
for key in self.data.keys():
try:
SCREAMING_SNAKE_CASE__ = self.data[key].std()
except: # noqa: E722
pass
SCREAMING_SNAKE_CASE__ = env.observation_space.shape[0]
SCREAMING_SNAKE_CASE__ = env.action_space.shape[0]
def A_ ( self : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any ):
return (x_in - self.means[key]) / self.stds[key]
def A_ ( self : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] ):
return x_in * self.stds[key] + self.means[key]
def A_ ( self : List[Any] , UpperCAmelCase_ : List[Any] ):
if type(UpperCAmelCase_ ) is dict:
return {k: self.to_torch(UpperCAmelCase_ ) for k, v in x_in.items()}
elif torch.is_tensor(UpperCAmelCase_ ):
return x_in.to(self.unet.device )
return torch.tensor(UpperCAmelCase_ , device=self.unet.device )
def A_ ( self : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict ):
for key, val in cond.items():
SCREAMING_SNAKE_CASE__ = val.clone()
return x_in
def A_ ( self : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE__ = x.shape[0]
SCREAMING_SNAKE_CASE__ = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
SCREAMING_SNAKE_CASE__ = torch.full((batch_size,) , UpperCAmelCase_ , device=self.unet.device , dtype=torch.long )
for _ in range(UpperCAmelCase_ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
SCREAMING_SNAKE_CASE__ = self.value_function(x.permute(0 , 2 , 1 ) , UpperCAmelCase_ ).sample
SCREAMING_SNAKE_CASE__ = torch.autograd.grad([y.sum()] , [x] )[0]
SCREAMING_SNAKE_CASE__ = self.scheduler._get_variance(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = torch.exp(0.5 * posterior_variance )
SCREAMING_SNAKE_CASE__ = model_std * grad
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = x.detach()
SCREAMING_SNAKE_CASE__ = x + scale * grad
SCREAMING_SNAKE_CASE__ = self.reset_xa(UpperCAmelCase_ , UpperCAmelCase_ , self.action_dim )
SCREAMING_SNAKE_CASE__ = self.unet(x.permute(0 , 2 , 1 ) , UpperCAmelCase_ ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
SCREAMING_SNAKE_CASE__ = self.scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , predict_epsilon=UpperCAmelCase_ )['prev_sample']
# apply conditions to the trajectory (set the initial state)
SCREAMING_SNAKE_CASE__ = self.reset_xa(UpperCAmelCase_ , UpperCAmelCase_ , self.action_dim )
SCREAMING_SNAKE_CASE__ = self.to_torch(UpperCAmelCase_ )
return x, y
def __call__( self : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str=64 , UpperCAmelCase_ : Optional[int]=32 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Optional[Any]=0.1 ):
# normalize the observations and create batch dimension
SCREAMING_SNAKE_CASE__ = self.normalize(UpperCAmelCase_ , 'observations' )
SCREAMING_SNAKE_CASE__ = obs[None].repeat(UpperCAmelCase_ , axis=0 )
SCREAMING_SNAKE_CASE__ = {0: self.to_torch(UpperCAmelCase_ )}
SCREAMING_SNAKE_CASE__ = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
SCREAMING_SNAKE_CASE__ = randn_tensor(UpperCAmelCase_ , device=self.unet.device )
SCREAMING_SNAKE_CASE__ = self.reset_xa(UpperCAmelCase_ , UpperCAmelCase_ , self.action_dim )
SCREAMING_SNAKE_CASE__ = self.to_torch(UpperCAmelCase_ )
# run the diffusion process
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.run_diffusion(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# sort output trajectories by value
SCREAMING_SNAKE_CASE__ = y.argsort(0 , descending=UpperCAmelCase_ ).squeeze()
SCREAMING_SNAKE_CASE__ = x[sorted_idx]
SCREAMING_SNAKE_CASE__ = sorted_values[:, :, : self.action_dim]
SCREAMING_SNAKE_CASE__ = actions.detach().cpu().numpy()
SCREAMING_SNAKE_CASE__ = self.de_normalize(UpperCAmelCase_ , key='actions' )
# select the action with the highest value
if y is not None:
SCREAMING_SNAKE_CASE__ = 0
else:
# if we didn't run value guiding, select a random action
SCREAMING_SNAKE_CASE__ = np.random.randint(0 , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = denorm_actions[selected_index, 0]
return denorm_actions
| 176 |
def _lowercase ( ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 1
while len(UpperCamelCase_ ) < 1e6:
constant.append(str(UpperCamelCase_ ) )
i += 1
SCREAMING_SNAKE_CASE__ = ''.join(UpperCamelCase_ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution())
| 176 | 1 |
"""simple docstring"""
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
__A : Union[str, Any] = logging.get_logger(__name__)
class __UpperCamelCase ( _A ):
SCREAMING_SNAKE_CASE = CLIPConfig
SCREAMING_SNAKE_CASE = ["CLIPEncoderLayer"]
def __init__(self : int , __SCREAMING_SNAKE_CASE : CLIPConfig):
super().__init__(__SCREAMING_SNAKE_CASE)
A = CLIPVisionModelWithProjection(config.vision_config)
A = nn.Linear(config.vision_config.projection_dim , 1)
A = nn.Linear(config.vision_config.projection_dim , 1)
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ (self : Dict , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[str]=0.5 , __SCREAMING_SNAKE_CASE : Optional[int]=0.5):
A = self.vision_model(__SCREAMING_SNAKE_CASE)[0]
A = self.p_head(__SCREAMING_SNAKE_CASE)
A = nsfw_detected.flatten()
A = nsfw_detected > p_threshold
A = nsfw_detected.tolist()
if any(__SCREAMING_SNAKE_CASE):
logger.warning(
"Potential NSFW content was detected in one or more images. A black image will be returned instead."
" Try again with a different prompt and/or seed.")
for idx, nsfw_detected_ in enumerate(__SCREAMING_SNAKE_CASE):
if nsfw_detected_:
A = np.zeros(images[idx].shape)
A = self.w_head(__SCREAMING_SNAKE_CASE)
A = watermark_detected.flatten()
A = watermark_detected > w_threshold
A = watermark_detected.tolist()
if any(__SCREAMING_SNAKE_CASE):
logger.warning(
"Potential watermarked content was detected in one or more images. A black image will be returned instead."
" Try again with a different prompt and/or seed.")
for idx, watermark_detected_ in enumerate(__SCREAMING_SNAKE_CASE):
if watermark_detected_:
A = np.zeros(images[idx].shape)
return images, nsfw_detected, watermark_detected
| 57 |
"""simple docstring"""
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
__A : Any = open # noqa: we just need to have a builtin inside this module to test it properly
| 57 | 1 |
"""simple docstring"""
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : Dict = (PNDMScheduler,)
__lowerCAmelCase : int = (('num_inference_steps', 50),)
def SCREAMING_SNAKE_CASE ( self , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Tuple = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**_SCREAMING_SNAKE_CASE )
return config
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE=0 , **_SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = dict(self.forward_default_kwargs )
UpperCAmelCase : Any = kwargs.pop("""num_inference_steps""" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = self.dummy_sample
UpperCAmelCase : Dict = 0.1 * sample
UpperCAmelCase : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase : str = self.get_scheduler_config(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
UpperCAmelCase : Tuple = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = scheduler_class.from_pretrained(_SCREAMING_SNAKE_CASE )
new_scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
UpperCAmelCase : Union[str, Any] = dummy_past_residuals[:]
UpperCAmelCase : int = scheduler.step_prk(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
UpperCAmelCase : str = new_scheduler.step_prk(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase : int = scheduler.step_plms(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
UpperCAmelCase : List[Any] = new_scheduler.step_plms(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE=0 , **_SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = dict(self.forward_default_kwargs )
UpperCAmelCase : Tuple = kwargs.pop("""num_inference_steps""" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = self.dummy_sample
UpperCAmelCase : Tuple = 0.1 * sample
UpperCAmelCase : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase : str = self.get_scheduler_config()
UpperCAmelCase : Tuple = scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase : Any = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = scheduler_class.from_pretrained(_SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
new_scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase : Dict = dummy_past_residuals[:]
UpperCAmelCase : Dict = scheduler.step_prk(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
UpperCAmelCase : Optional[Any] = new_scheduler.step_prk(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase : Tuple = scheduler.step_plms(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
UpperCAmelCase : List[Any] = new_scheduler.step_plms(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE ( self , **_SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : List[Any] = self.scheduler_classes[0]
UpperCAmelCase : List[Any] = self.get_scheduler_config(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = 10
UpperCAmelCase : Optional[int] = self.dummy_model()
UpperCAmelCase : Any = self.dummy_sample_deter
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.prk_timesteps ):
UpperCAmelCase : Tuple = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = scheduler.step_prk(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
UpperCAmelCase : List[Any] = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = scheduler.step_plms(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).prev_sample
return sample
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : str = dict(self.forward_default_kwargs )
UpperCAmelCase : Any = kwargs.pop("""num_inference_steps""" , _SCREAMING_SNAKE_CASE )
for scheduler_class in self.scheduler_classes:
UpperCAmelCase : str = self.get_scheduler_config()
UpperCAmelCase : Tuple = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = self.dummy_sample
UpperCAmelCase : str = 0.1 * sample
if num_inference_steps is not None and hasattr(_SCREAMING_SNAKE_CASE , """set_timesteps""" ):
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
elif num_inference_steps is not None and not hasattr(_SCREAMING_SNAKE_CASE , """set_timesteps""" ):
UpperCAmelCase : str = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase : Tuple = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
UpperCAmelCase : Any = dummy_past_residuals[:]
UpperCAmelCase : Tuple = scheduler.step_prk(_SCREAMING_SNAKE_CASE , 0 , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
UpperCAmelCase : Optional[int] = scheduler.step_prk(_SCREAMING_SNAKE_CASE , 1 , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
UpperCAmelCase : Dict = scheduler.step_plms(_SCREAMING_SNAKE_CASE , 0 , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
UpperCAmelCase : Tuple = scheduler.step_plms(_SCREAMING_SNAKE_CASE , 1 , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = self.scheduler_classes[0]
UpperCAmelCase : int = self.get_scheduler_config(steps_offset=1 )
UpperCAmelCase : Optional[int] = scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=_SCREAMING_SNAKE_CASE , beta_end=_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
for t in [1, 5, 10]:
self.check_over_forward(time_step=_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = 27
for scheduler_class in self.scheduler_classes:
UpperCAmelCase : Tuple = self.dummy_sample
UpperCAmelCase : str = 0.1 * sample
UpperCAmelCase : Dict = self.get_scheduler_config()
UpperCAmelCase : Union[str, Any] = scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
UpperCAmelCase : Dict = scheduler.step_prk(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).prev_sample
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Union[str, Any] = self.scheduler_classes[0]
UpperCAmelCase : int = self.get_scheduler_config()
UpperCAmelCase : Optional[int] = scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : int = self.full_loop()
UpperCAmelCase : Optional[int] = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase : Union[str, Any] = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 198.1318 ) < 1E-2
assert abs(result_mean.item() - 0.2580 ) < 1E-3
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : str = self.full_loop(prediction_type="""v_prediction""" )
UpperCAmelCase : Optional[Any] = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase : List[Any] = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 67.3986 ) < 1E-2
assert abs(result_mean.item() - 0.0878 ) < 1E-3
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.full_loop(set_alpha_to_one=_SCREAMING_SNAKE_CASE , beta_start=0.01 )
UpperCAmelCase : Union[str, Any] = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase : List[str] = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 230.0399 ) < 1E-2
assert abs(result_mean.item() - 0.2995 ) < 1E-3
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : str = self.full_loop(set_alpha_to_one=_SCREAMING_SNAKE_CASE , beta_start=0.01 )
UpperCAmelCase : Any = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase : str = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 186.9482 ) < 1E-2
assert abs(result_mean.item() - 0.2434 ) < 1E-3
| 109 | """simple docstring"""
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowercase ( a__ : str , a__ : bool = True , a__ : float = math.inf , a__ : float = -math.inf , a__ : float = math.inf , a__ : float = -math.inf , a__ : bool = False , a__ : float = 100 , a__ : float = 0.01 , a__ : float = 1 , ) -> Any:
_UpperCamelCase = False
_UpperCamelCase = search_prob
_UpperCamelCase = start_temperate
_UpperCamelCase = []
_UpperCamelCase = 0
_UpperCamelCase = None
while not search_end:
_UpperCamelCase = current_state.score()
if best_state is None or current_score > best_state.score():
_UpperCamelCase = current_state
scores.append(a__ )
iterations += 1
_UpperCamelCase = None
_UpperCamelCase = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_UpperCamelCase = random.randint(0 , len(a__ ) - 1 ) # picking a random neighbor
_UpperCamelCase = neighbors.pop(a__ )
_UpperCamelCase = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_UpperCamelCase = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_UpperCamelCase = picked_neighbor
else:
_UpperCamelCase = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_UpperCamelCase = picked_neighbor
_UpperCamelCase = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_UpperCamelCase = True
else:
_UpperCamelCase = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(a__ ) , a__ )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def lowercase ( a__ : str , a__ : List[Any] ) -> Tuple:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
UpperCAmelCase = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
UpperCAmelCase = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def lowercase ( a__ : str , a__ : Optional[Any] ) -> Union[str, Any]:
return (3 * x**2) - (6 * y)
UpperCAmelCase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F'''{local_min.score()}'''
)
UpperCAmelCase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F'''{local_min.score()}'''
)
| 256 | 0 |
"""simple docstring"""
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
_a : Any = {
'E': 12.70,
'T': 9.06,
'A': 8.17,
'O': 7.51,
'I': 6.97,
'N': 6.75,
'S': 6.33,
'H': 6.09,
'R': 5.99,
'D': 4.25,
'L': 4.03,
'C': 2.78,
'U': 2.76,
'M': 2.41,
'W': 2.36,
'F': 2.23,
'G': 2.02,
'Y': 1.97,
'P': 1.93,
'B': 1.29,
'V': 0.98,
'K': 0.77,
'J': 0.15,
'X': 0.15,
'Q': 0.10,
'Z': 0.07,
}
_a : Union[str, Any] = 'ETAOINSHRDLCUMWFGYPBVKJXQZ'
_a : str = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ) -> Dict:
_lowerCAmelCase : Any = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : tuple ) -> List[Any]:
return x[0]
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ) -> int:
_lowerCAmelCase : Any = get_letter_count(UpperCAmelCase_ )
_lowerCAmelCase : str = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(UpperCAmelCase_ )
_lowerCAmelCase : Tuple = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find ,reverse=UpperCAmelCase_ )
_lowerCAmelCase : List[str] = """""".join(freq_to_letter[freq] )
_lowerCAmelCase : Optional[Any] = list(freq_to_letter_str.items() )
freq_pairs.sort(key=UpperCAmelCase_ ,reverse=UpperCAmelCase_ )
_lowerCAmelCase : str = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ) -> Optional[int]:
_lowerCAmelCase : Optional[int] = get_frequency_order(UpperCAmelCase_ )
_lowerCAmelCase : Optional[int] = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 353 | """simple docstring"""
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__ = None , a__ = None , a__ = True , a__ = None , a__ = False , a__ = None , a__ = True , a__ = "arrow" , **a__ , ):
super().__init__(
split=a__ , features=a__ , cache_dir=a__ , keep_in_memory=a__ , streaming=a__ , **a__ , )
_lowerCAmelCase : List[Any] = load_from_cache_file
_lowerCAmelCase : str = file_format
_lowerCAmelCase : Dict = Spark(
df=a__ , features=a__ , cache_dir=a__ , working_dir=a__ , **a__ , )
def __A ( self ):
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
_lowerCAmelCase : Dict = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=a__ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 126 | 0 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
lowercase_ : Union[str, Any] = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowercase_ : str = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Optional[int] = TextaTextGenerationPipeline(model=snake_case_ , tokenizer=snake_case_ )
return generator, ["Something to write", "Something else"]
def lowerCamelCase_ ( self , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : List[Any] = generator('Something there' )
self.assertEqual(snake_case_ , [{'generated_text': ANY(snake_case_ )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['generated_text'].startswith('Something there' ) )
A_ : Optional[int] = generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=snake_case_ )
self.assertEqual(
snake_case_ , [
[{'generated_text': ANY(snake_case_ )}, {'generated_text': ANY(snake_case_ )}],
[{'generated_text': ANY(snake_case_ )}, {'generated_text': ANY(snake_case_ )}],
] , )
A_ : str = generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=snake_case_ )
self.assertEqual(
snake_case_ , [
[{'generated_text': ANY(snake_case_ )}, {'generated_text': ANY(snake_case_ )}],
[{'generated_text': ANY(snake_case_ )}, {'generated_text': ANY(snake_case_ )}],
] , )
with self.assertRaises(snake_case_ ):
generator(4 )
@require_torch
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='pt' )
# do_sample=False necessary for reproducibility
A_ : Optional[Any] = generator('Something there' , do_sample=snake_case_ )
self.assertEqual(snake_case_ , [{'generated_text': ''}] )
A_ : List[Any] = 3
A_ : Tuple = generator(
'Something there' , num_return_sequences=snake_case_ , num_beams=snake_case_ , )
A_ : str = [
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': ''},
]
self.assertEqual(snake_case_ , snake_case_ )
A_ : List[Any] = generator('This is a test' , do_sample=snake_case_ , num_return_sequences=2 , return_tensors=snake_case_ )
self.assertEqual(
snake_case_ , [
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
] , )
A_ : List[Any] = generator.model.config.eos_token_id
A_ : Optional[Any] = '<pad>'
A_ : List[Any] = generator(
['This is a test', 'This is a second test'] , do_sample=snake_case_ , num_return_sequences=2 , batch_size=2 , return_tensors=snake_case_ , )
self.assertEqual(
snake_case_ , [
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
] , )
@require_tf
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : int = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='tf' )
# do_sample=False necessary for reproducibility
A_ : List[Any] = generator('Something there' , do_sample=snake_case_ )
self.assertEqual(snake_case_ , [{'generated_text': ''}] ) | 286 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : List[str] = {'configuration_xlnet': ['XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : str = ['XLNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[str] = ['XLNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : int = [
'XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLNetForMultipleChoice',
'XLNetForQuestionAnswering',
'XLNetForQuestionAnsweringSimple',
'XLNetForSequenceClassification',
'XLNetForTokenClassification',
'XLNetLMHeadModel',
'XLNetModel',
'XLNetPreTrainedModel',
'load_tf_weights_in_xlnet',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Union[str, Any] = [
'TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLNetForMultipleChoice',
'TFXLNetForQuestionAnsweringSimple',
'TFXLNetForSequenceClassification',
'TFXLNetForTokenClassification',
'TFXLNetLMHeadModel',
'TFXLNetMainLayer',
'TFXLNetModel',
'TFXLNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 286 | 1 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : int = logging.get_logger(__name__)
A__ : str = {
'microsoft/wavlm-base': 'https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :List[Any] = "wavlm"
def __init__( self : Dict , snake_case__ : List[str]=32 , snake_case__ : Optional[Any]=768 , snake_case__ : List[str]=12 , snake_case__ : Optional[Any]=12 , snake_case__ : Dict=3072 , snake_case__ : Optional[int]="gelu" , snake_case__ : Any=0.1 , snake_case__ : Union[str, Any]=0.1 , snake_case__ : Union[str, Any]=0.1 , snake_case__ : str=0.0 , snake_case__ : int=0.1 , snake_case__ : Optional[Any]=0.1 , snake_case__ : str=0.02 , snake_case__ : Dict=1E-5 , snake_case__ : Optional[Any]="group" , snake_case__ : int="gelu" , snake_case__ : Optional[Any]=(512, 512, 512, 512, 512, 512, 512) , snake_case__ : Tuple=(5, 2, 2, 2, 2, 2, 2) , snake_case__ : Dict=(10, 3, 3, 3, 3, 2, 2) , snake_case__ : Optional[int]=False , snake_case__ : Any=128 , snake_case__ : List[Any]=16 , snake_case__ : List[str]=320 , snake_case__ : Union[str, Any]=800 , snake_case__ : Any=False , snake_case__ : Any=True , snake_case__ : str=0.05 , snake_case__ : Dict=10 , snake_case__ : Dict=2 , snake_case__ : Optional[Any]=0.0 , snake_case__ : List[Any]=10 , snake_case__ : Dict=320 , snake_case__ : List[Any]=2 , snake_case__ : Any=0.1 , snake_case__ : List[Any]=100 , snake_case__ : str=256 , snake_case__ : List[Any]=256 , snake_case__ : Tuple=0.1 , snake_case__ : Union[str, Any]="mean" , snake_case__ : Any=False , snake_case__ : str=False , snake_case__ : Optional[Any]=256 , snake_case__ : int=(512, 512, 512, 512, 1500) , snake_case__ : Optional[Any]=(5, 3, 3, 1, 1) , snake_case__ : List[Any]=(1, 2, 3, 1, 1) , snake_case__ : int=512 , snake_case__ : Any=80 , snake_case__ : Dict=0 , snake_case__ : Dict=1 , snake_case__ : Union[str, Any]=2 , snake_case__ : Tuple=False , snake_case__ : Union[str, Any]=3 , snake_case__ : Optional[Any]=2 , snake_case__ : Union[str, Any]=3 , snake_case__ : Optional[int]=None , **snake_case__ : Union[str, Any] , ):
super().__init__(**snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ )
lowerCamelCase_ : Optional[Any] =hidden_size
lowerCamelCase_ : Optional[Any] =feat_extract_norm
lowerCamelCase_ : List[str] =feat_extract_activation
lowerCamelCase_ : Optional[int] =list(snake_case__ )
lowerCamelCase_ : Tuple =list(snake_case__ )
lowerCamelCase_ : int =list(snake_case__ )
lowerCamelCase_ : int =conv_bias
lowerCamelCase_ : Any =num_buckets
lowerCamelCase_ : Dict =max_bucket_distance
lowerCamelCase_ : Any =num_conv_pos_embeddings
lowerCamelCase_ : str =num_conv_pos_embedding_groups
lowerCamelCase_ : List[Any] =len(self.conv_dim )
lowerCamelCase_ : Dict =num_hidden_layers
lowerCamelCase_ : int =intermediate_size
lowerCamelCase_ : Dict =hidden_act
lowerCamelCase_ : Dict =num_attention_heads
lowerCamelCase_ : Dict =hidden_dropout
lowerCamelCase_ : int =attention_dropout
lowerCamelCase_ : List[str] =activation_dropout
lowerCamelCase_ : Optional[int] =feat_proj_dropout
lowerCamelCase_ : int =final_dropout
lowerCamelCase_ : Tuple =layerdrop
lowerCamelCase_ : str =layer_norm_eps
lowerCamelCase_ : Optional[int] =initializer_range
lowerCamelCase_ : Any =num_ctc_classes
lowerCamelCase_ : Any =vocab_size
lowerCamelCase_ : str =do_stable_layer_norm
lowerCamelCase_ : Union[str, Any] =use_weighted_layer_sum
lowerCamelCase_ : Tuple =classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase_ : Optional[Any] =apply_spec_augment
lowerCamelCase_ : str =mask_time_prob
lowerCamelCase_ : List[Any] =mask_time_length
lowerCamelCase_ : Optional[int] =mask_time_min_masks
lowerCamelCase_ : int =mask_feature_prob
lowerCamelCase_ : Optional[int] =mask_feature_length
# parameters for pretraining with codevector quantized representations
lowerCamelCase_ : List[Any] =num_codevectors_per_group
lowerCamelCase_ : Dict =num_codevector_groups
lowerCamelCase_ : List[Any] =contrastive_logits_temperature
lowerCamelCase_ : Tuple =num_negatives
lowerCamelCase_ : int =codevector_dim
lowerCamelCase_ : str =proj_codevector_dim
lowerCamelCase_ : Optional[int] =diversity_loss_weight
# ctc loss
lowerCamelCase_ : Any =ctc_loss_reduction
lowerCamelCase_ : Optional[Any] =ctc_zero_infinity
# adapter
lowerCamelCase_ : Dict =add_adapter
lowerCamelCase_ : Dict =adapter_kernel_size
lowerCamelCase_ : Optional[Any] =adapter_stride
lowerCamelCase_ : List[Any] =num_adapter_layers
lowerCamelCase_ : Optional[Any] =output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowerCamelCase_ : Optional[Any] =classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowerCamelCase_ : int =list(snake_case__ )
lowerCamelCase_ : List[Any] =list(snake_case__ )
lowerCamelCase_ : Any =list(snake_case__ )
lowerCamelCase_ : Optional[Any] =xvector_output_dim
@property
def UpperCAmelCase__ ( self : List[Any] ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 209 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
A__ : List[str] = {
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': ['export', 'validate_model_outputs'],
'features': ['FeaturesManager'],
'utils': ['ParameterFormat', 'compute_serialized_parameters_size'],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
A__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 209 | 1 |
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowercase :
"""simple docstring"""
def __init__( self , A , A=1_3 , A=7 , A=True , A=True , A=False , A=True , A=9_9 , A=3_2 , A=5 , A=4 , A=3_7 , A="gelu" , A=0.1 , A=0.1 , A=5_1_2 , A=1_6 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> List[Any]:
snake_case : List[Any] = parent
snake_case : Union[str, Any] = batch_size
snake_case : str = seq_length
snake_case : Any = is_training
snake_case : Tuple = use_input_mask
snake_case : Dict = use_token_type_ids
snake_case : int = use_labels
snake_case : List[Any] = vocab_size
snake_case : int = hidden_size
snake_case : Any = num_hidden_layers
snake_case : Optional[Any] = num_attention_heads
snake_case : Optional[Any] = intermediate_size
snake_case : Optional[int] = hidden_act
snake_case : Tuple = hidden_dropout_prob
snake_case : Optional[Any] = attention_probs_dropout_prob
snake_case : Union[str, Any] = max_position_embeddings
snake_case : int = type_vocab_size
snake_case : Optional[int] = type_sequence_label_size
snake_case : Union[str, Any] = initializer_range
snake_case : List[str] = num_labels
snake_case : Optional[int] = num_choices
snake_case : Tuple = scope
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case : str = None
if self.use_input_mask:
snake_case : Dict = random_attention_mask([self.batch_size, self.seq_length] )
snake_case : int = None
if self.use_token_type_ids:
snake_case : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case : Union[str, Any] = None
snake_case : Optional[Any] = None
snake_case : Optional[int] = None
if self.use_labels:
snake_case : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
snake_case : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self ) -> int:
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , )
def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> List[str]:
snake_case : List[str] = BioGptModel(config=A )
model.to(A )
model.eval()
snake_case : List[str] = model(A , attention_mask=A )
snake_case : Union[str, Any] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A , A , A , ) -> Optional[Any]:
snake_case : Tuple = BioGptForCausalLM(config=A )
model.to(A )
model.eval()
snake_case : List[Any] = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self , A , A , A , A , A , *A ) -> Union[str, Any]:
snake_case : int = BioGptModel(config=A )
model.to(A )
model.eval()
# create attention mask
snake_case : Tuple = torch.ones(input_ids.shape , dtype=torch.long , device=A )
snake_case : Optional[int] = self.seq_length // 2
snake_case : Any = 0
# first forward pass
snake_case , snake_case : str = model(A , attention_mask=A ).to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case : Dict = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
snake_case : Dict = ids_tensor((1,) , A ).item() + 1
snake_case : int = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
snake_case : Any = random_other_next_tokens
# append to next input_ids and attn_mask
snake_case : Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case : Tuple = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=A )] , dim=1 , )
# get two different outputs
snake_case : str = model(A , attention_mask=A )["""last_hidden_state"""]
snake_case : List[str] = model(A , past_key_values=A , attention_mask=A )["""last_hidden_state"""]
# select random slice
snake_case : Union[str, Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case : int = output_from_no_past[:, -1, random_slice_idx].detach()
snake_case : Optional[int] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A , A , atol=1e-3 ) )
def UpperCAmelCase ( self , A , A , A , A , A , *A ) -> Dict:
snake_case : Tuple = BioGptModel(config=A ).to(A ).eval()
snake_case : Any = torch.ones(input_ids.shape , dtype=torch.long , device=A )
# first forward pass
snake_case : List[str] = model(A , attention_mask=A , use_cache=A )
snake_case , snake_case : Tuple = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
snake_case : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case : int = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
snake_case : int = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case : Optional[int] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
snake_case : str = model(A , attention_mask=A )["""last_hidden_state"""]
snake_case : Any = model(A , attention_mask=A , past_key_values=A )[
"""last_hidden_state"""
]
# select random slice
snake_case : Union[str, Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case : Union[str, Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A , A , atol=1e-3 ) )
def UpperCAmelCase ( self , A , A , A , A , A , *A , A=False ) -> List[Any]:
snake_case : List[Any] = BioGptForCausalLM(A )
model.to(A )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
snake_case : str = model(A , labels=A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def UpperCAmelCase ( self , A , *A ) -> int:
snake_case : Optional[int] = BioGptModel(A )
snake_case : List[Any] = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_01 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def UpperCAmelCase ( self , A , A , A , A , A , *A ) -> Union[str, Any]:
snake_case : List[Any] = self.num_labels
snake_case : Union[str, Any] = BioGptForTokenClassification(A )
model.to(A )
model.eval()
snake_case : List[Any] = model(A , attention_mask=A , token_type_ids=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self ) -> Tuple:
snake_case : List[str] = self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) ,
) : str = config_and_inputs
snake_case : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __lowercase (UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
_snake_case = (BioGptForCausalLM,) if is_torch_available() else ()
_snake_case = (
{
"""feature-extraction""": BioGptModel,
"""text-classification""": BioGptForSequenceClassification,
"""text-generation""": BioGptForCausalLM,
"""token-classification""": BioGptForTokenClassification,
"""zero-shot""": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
_snake_case = False
def UpperCAmelCase ( self ) -> int:
snake_case : Tuple = BioGptModelTester(self )
snake_case : str = ConfigTester(self , config_class=A , hidden_size=3_7 )
def UpperCAmelCase ( self ) -> Dict:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCAmelCase ( self ) -> Any:
snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case : Dict = type
self.model_tester.create_and_check_model(*A )
def UpperCAmelCase ( self ) -> Dict:
snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*A )
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*A , gradient_checkpointing=A )
def UpperCAmelCase ( self ) -> int:
snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*A )
def UpperCAmelCase ( self ) -> int:
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*A )
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*A )
@slow
def UpperCAmelCase ( self ) -> Tuple:
snake_case : List[Any] = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
model.to(A )
snake_case : Union[str, Any] = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
snake_case : Any = """left"""
# Define PAD Token = EOS Token = 50256
snake_case : Union[str, Any] = tokenizer.eos_token
snake_case : Optional[int] = model.config.eos_token_id
# use different length sentences to test batching
snake_case : Dict = [
"""Hello, my dog is a little""",
"""Today, I""",
]
snake_case : Tuple = tokenizer(A , return_tensors="""pt""" , padding=A )
snake_case : Any = inputs["""input_ids"""].to(A )
snake_case : Any = model.generate(
input_ids=A , attention_mask=inputs["""attention_mask"""].to(A ) , )
snake_case : str = tokenizer(sentences[0] , return_tensors="""pt""" ).input_ids.to(A )
snake_case : Optional[int] = model.generate(input_ids=A )
snake_case : Union[str, Any] = inputs_non_padded.shape[-1] - inputs["""attention_mask"""][-1].long().sum().cpu().item()
snake_case : str = tokenizer(sentences[1] , return_tensors="""pt""" ).input_ids.to(A )
snake_case : Tuple = model.generate(input_ids=A , max_length=model.config.max_length - num_paddings )
snake_case : Tuple = tokenizer.batch_decode(A , skip_special_tokens=A )
snake_case : Optional[Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=A )
snake_case : Optional[Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=A )
snake_case : Any = [
"""Hello, my dog is a little bit bigger than a little bit.""",
"""Today, I have a good idea of how to use the information""",
]
self.assertListEqual(A , A )
self.assertListEqual(A , [non_padded_sentence, padded_sentence] )
@slow
def UpperCAmelCase ( self ) -> Any:
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : Any = BioGptModel.from_pretrained(A )
self.assertIsNotNone(A )
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case , snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
snake_case : Union[str, Any] = 3
snake_case : List[Any] = input_dict["""input_ids"""]
snake_case : str = input_ids.ne(1 ).to(A )
snake_case : Any = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case : Optional[int] = BioGptForSequenceClassification(A )
model.to(A )
model.eval()
snake_case : str = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case , snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case : str = 3
snake_case : Union[str, Any] = """multi_label_classification"""
snake_case : Any = input_dict["""input_ids"""]
snake_case : Tuple = input_ids.ne(1 ).to(A )
snake_case : Optional[int] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
snake_case : Union[str, Any] = BioGptForSequenceClassification(A )
model.to(A )
model.eval()
snake_case : Union[str, Any] = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : Optional[Any] = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
snake_case : Union[str, Any] = torch.tensor([[2, 4_8_0_5, 9, 6_5_6, 2_1]] )
snake_case : str = model(A )[0]
snake_case : Optional[int] = 4_2_3_8_4
snake_case : Dict = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , A )
snake_case : Union[str, Any] = torch.tensor(
[[[-9.52_36, -9.89_18, 10.45_57], [-11.04_69, -9.64_23, 8.10_22], [-8.86_64, -7.88_26, 5.53_25]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , A , atol=1e-4 ) )
@slow
def UpperCAmelCase ( self ) -> Any:
snake_case : Optional[Any] = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
snake_case : str = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
model.to(A )
torch.manual_seed(0 )
snake_case : Any = tokenizer("""COVID-19 is""" , return_tensors="""pt""" ).to(A )
snake_case : List[str] = model.generate(
**A , min_length=1_0_0 , max_length=1_0_2_4 , num_beams=5 , early_stopping=A , )
snake_case : Any = tokenizer.decode(output_ids[0] , skip_special_tokens=A )
snake_case : Dict = (
"""COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the"""
""" causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and"""
""" territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),"""
""" and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and"""
""" more than 800,000 deaths."""
)
self.assertEqual(A , A )
| 124 |
def SCREAMING_SNAKE_CASE__ ( ) -> list[list[int]]:
return [list(range(1000 - i ,-1000 - i ,-1 ) ) for i in range(1000 )]
lowerCamelCase : List[Any] = generate_large_matrix()
lowerCamelCase : Optional[int] = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> None:
assert all(row == sorted(lowercase ,reverse=lowercase ) for row in grid )
assert all(list(lowercase ) == sorted(lowercase ,reverse=lowercase ) for col in zip(*lowercase ) )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
snake_case : Tuple = 0
snake_case : List[Any] = len(lowercase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
snake_case : Tuple = (left + right) // 2
snake_case : Dict = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
snake_case : List[Any] = mid + 1
else:
snake_case : str = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
snake_case : Union[str, Any] = 0
snake_case : Dict = len(grid[0] )
for i in range(len(lowercase ) ):
snake_case : Tuple = find_negative_index(grid[i][:bound] )
total += bound
return (len(lowercase ) * len(grid[0] )) - total
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
return len([number for row in grid for number in row if number < 0] )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
snake_case : Dict = 0
for row in grid:
for i, number in enumerate(lowercase ):
if number < 0:
total += len(lowercase ) - i
break
return total
def SCREAMING_SNAKE_CASE__ ( ) -> None:
from timeit import timeit
print("""Running benchmarks""" )
snake_case : List[Any] = (
"""from __main__ import count_negatives_binary_search, """
"""count_negatives_brute_force, count_negatives_brute_force_with_break, grid"""
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
snake_case : int = timeit(f"""{func}(grid=grid)""" ,setup=lowercase ,number=500 )
print(f"""{func}() took {time:0.4f} seconds""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 124 | 1 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class __UpperCAmelCase( unittest.TestCase , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= load_tool("text-classification" )
self.tool.setup()
lowercase__ : Union[str, Any]= load_tool("text-classification" , remote=snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[str]= self.tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(snake_case__ , "positive" )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : str= self.remote_tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(snake_case__ , "positive" )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : int= self.tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(snake_case__ , "positive" )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(snake_case__ , "positive" )
| 365 |
"""simple docstring"""
from scipy.stats import spearmanr
import datasets
a : Dict = """
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
a : List[Any] = """
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
a : int = r"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCAmelCase( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__=False ):
'''simple docstring'''
lowercase__ : Optional[int]= spearmanr(snake_case__ , snake_case__ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 150 | 0 |
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple ) -> List[str]:
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def lowerCamelCase ( UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any]=0 ) -> Union[str, Any]:
return sorted(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : x[column] )
def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : str=float("""inf""" ) ) -> int:
for i in range(points_counts - 1 ):
for j in range(i + 1 , UpperCAmelCase__ ):
lowercase_ : Optional[int] = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
lowercase_ : int = current_dis
return min_dis
def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any=float("""inf""" ) ) -> int:
for i in range(min(6 , points_counts - 1 ) , UpperCAmelCase__ ):
for j in range(max(0 , i - 6 ) , UpperCAmelCase__ ):
lowercase_ : Optional[Any] = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
lowercase_ : Tuple = current_dis
return min_dis
def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] ) -> Any:
# base case
if points_counts <= 3:
return dis_between_closest_pair(UpperCAmelCase__ , UpperCAmelCase__ )
# recursion
lowercase_ : Optional[int] = points_counts // 2
lowercase_ : Tuple = closest_pair_of_points_sqr(
UpperCAmelCase__ , points_sorted_on_y[:mid] , UpperCAmelCase__ )
lowercase_ : Union[str, Any] = closest_pair_of_points_sqr(
UpperCAmelCase__ , points_sorted_on_y[mid:] , points_counts - mid )
lowercase_ : int = min(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : Tuple = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(UpperCAmelCase__ )
lowercase_ : Tuple = dis_between_closest_in_strip(
UpperCAmelCase__ , len(UpperCAmelCase__ ) , UpperCAmelCase__ )
return min(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] ) -> Dict:
lowercase_ : Union[str, Any] = column_based_sort(UpperCAmelCase__ , column=0 )
lowercase_ : int = column_based_sort(UpperCAmelCase__ , column=1 )
return (
closest_pair_of_points_sqr(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
) ** 0.5
if __name__ == "__main__":
_lowercase : Union[str, Any] = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print("Distance:", closest_pair_of_points(points, len(points)))
| 239 | '''simple docstring'''
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("""repo_id""" , ["""canonical_dataset_name""", """org-name/dataset-name"""] )
@pytest.mark.parametrize("""path""" , ["""filename.csv""", """filename with blanks.csv"""] )
@pytest.mark.parametrize("""revision""" , [None, """v2"""] )
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] ) -> Optional[int]:
lowercase_ : Union[str, Any] = hf_hub_url(repo_id=UpperCAmelCase__ , path=UpperCAmelCase__ , revision=UpperCAmelCase__ )
assert url == F'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(UpperCAmelCase__ )}'''
| 239 | 1 |
from __future__ import annotations
from collections.abc import Generator
def __a ( ) -> Generator[int, None, None]:
'''simple docstring'''
UpperCAmelCase_= {}
UpperCAmelCase_= 2
while True:
UpperCAmelCase_= factor_map.pop(lowerCAmelCase_ ,lowerCAmelCase_ )
if factor:
UpperCAmelCase_= factor + prime
while x in factor_map:
x += factor
UpperCAmelCase_= factor
else:
UpperCAmelCase_= prime
yield prime
prime += 1
def __a ( lowerCAmelCase_ : float = 1E10 ) -> int:
'''simple docstring'''
UpperCAmelCase_= sieve()
UpperCAmelCase_= 1
while True:
UpperCAmelCase_= next(lowerCAmelCase_ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(lowerCAmelCase_ )
n += 2
if __name__ == "__main__":
print(solution())
| 277 |
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def __a ( lowerCAmelCase_ : Optional[int] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_= [
"""decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase_ ,lowerCAmelCase_ )
def __a ( lowerCAmelCase_ : Any ) -> str:
'''simple docstring'''
UpperCAmelCase_, UpperCAmelCase_= emb.weight.shape
UpperCAmelCase_= nn.Linear(lowerCAmelCase_ ,lowerCAmelCase_ ,bias=lowerCAmelCase_ )
UpperCAmelCase_= emb.weight.data
return lin_layer
def __a ( lowerCAmelCase_ : List[str] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_= torch.load(lowerCAmelCase_ ,map_location="""cpu""" )
UpperCAmelCase_= Namespace(**checkpoint["""cfg"""]["""model"""] )
UpperCAmelCase_= checkpoint["""model"""]
remove_ignore_keys_(lowerCAmelCase_ )
UpperCAmelCase_= state_dict["""decoder.embed_tokens.weight"""].shape[0]
UpperCAmelCase_= {key.replace("""decoder""" ,"""model""" ): val for key, val in state_dict.items()}
UpperCAmelCase_= XGLMConfig(
vocab_size=lowerCAmelCase_ ,max_position_embeddings=args.max_target_positions ,num_layers=args.decoder_layers ,attention_heads=args.decoder_attention_heads ,ffn_dim=args.decoder_ffn_embed_dim ,d_model=args.decoder_embed_dim ,layerdrop=args.decoder_layerdrop ,dropout=args.dropout ,attention_dropout=args.attention_dropout ,activation_dropout=args.activation_dropout ,activation_function="""gelu""" ,scale_embedding=not args.no_scale_embedding ,tie_word_embeddings=args.share_decoder_input_output_embed ,)
UpperCAmelCase_= XGLMForCausalLM(lowerCAmelCase_ )
UpperCAmelCase_= model.load_state_dict(lowerCAmelCase_ ,strict=lowerCAmelCase_ )
print(lowerCAmelCase_ )
UpperCAmelCase_= make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
__A = parser.parse_args()
__A = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 277 | 1 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCAmelCase__ = {"tokenization_wav2vec2_phoneme": ["Wav2Vec2PhonemeCTCTokenizer"]}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase = {
'''tokenizer_file''': {
'''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''',
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''',
},
}
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Tuple = VOCAB_FILES_NAMES
__snake_case : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__snake_case : Optional[Any] = ["input_ids", "attention_mask"]
__snake_case : Optional[int] = None
def __init__( self: Dict , UpperCAmelCase_: Union[str, Any]=None , UpperCAmelCase_: str=None , UpperCAmelCase_: Optional[int]=None , UpperCAmelCase_: int="<unk>" , UpperCAmelCase_: List[str]="<s>" , UpperCAmelCase_: Tuple="</s>" , UpperCAmelCase_: List[Any]="<pad>" , UpperCAmelCase_: Dict=False , UpperCAmelCase_: Dict=False , **UpperCAmelCase_: Dict , ):
'''simple docstring'''
super().__init__(
UpperCAmelCase_ , UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ , **UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , UpperCAmelCase_ ) != add_prefix_space:
_SCREAMING_SNAKE_CASE = getattr(UpperCAmelCase_ , pre_tok_state.pop("""type""" ) )
_SCREAMING_SNAKE_CASE = add_prefix_space
_SCREAMING_SNAKE_CASE = pre_tok_class(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = add_prefix_space
def UpperCamelCase ( self: List[str] , *UpperCAmelCase_: Any , **UpperCAmelCase_: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = kwargs.get("""is_split_into_words""" , UpperCAmelCase_ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'
""" pretokenized inputs.""" )
return super()._batch_encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Union[str, Any] , *UpperCAmelCase_: Dict , **UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = kwargs.get("""is_split_into_words""" , UpperCAmelCase_ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'
""" pretokenized inputs.""" )
return super()._encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: Optional[str] = None ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: "Conversation" ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) + [self.eos_token_id] )
if len(UpperCAmelCase_ ) > self.model_max_length:
_SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :]
return input_ids
| 306 | 0 |
'''simple docstring'''
def _A ( snake_case ) -> int:
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
_lowercase : Any = 1
_lowercase : str = 1
while repunit:
_lowercase : Tuple = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def _A ( snake_case = 1_00_00_00 ) -> int:
_lowercase : Optional[int] = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(snake_case ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F'''{solution() = }''')
| 199 |
'''simple docstring'''
def _A ( snake_case , snake_case ) -> int:
return int((input_a, input_a).count(0 ) != 0 )
def _A ( ) -> None:
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 199 | 1 |
'''simple docstring'''
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : List[str] = {
"""snap-research/efficientformer-l1-300""": (
"""https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"""
),
}
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: int = "efficientformer"
def __init__( self : List[Any] , A : List[int] = [3, 2, 6, 4] , A : List[int] = [48, 96, 224, 448] , A : List[bool] = [True, True, True, True] , A : int = 448 , A : int = 32 , A : int = 4 , A : int = 7 , A : int = 5 , A : int = 8 , A : int = 4 , A : float = 0.0 , A : int = 16 , A : int = 3 , A : int = 3 , A : int = 3 , A : int = 2 , A : int = 1 , A : float = 0.0 , A : int = 1 , A : bool = True , A : bool = True , A : float = 1E-5 , A : str = "gelu" , A : float = 0.02 , A : float = 1E-12 , A : int = 224 , A : float = 1E-05 , **A : Optional[int] , ):
super().__init__(**A )
_UpperCAmelCase : int = hidden_act
_UpperCAmelCase : Tuple = hidden_dropout_prob
_UpperCAmelCase : Optional[Any] = hidden_sizes
_UpperCAmelCase : List[Any] = num_hidden_layers
_UpperCAmelCase : Any = num_attention_heads
_UpperCAmelCase : Any = initializer_range
_UpperCAmelCase : Optional[int] = layer_norm_eps
_UpperCAmelCase : int = patch_size
_UpperCAmelCase : List[str] = num_channels
_UpperCAmelCase : Any = depths
_UpperCAmelCase : int = mlp_expansion_ratio
_UpperCAmelCase : Union[str, Any] = downsamples
_UpperCAmelCase : Union[str, Any] = dim
_UpperCAmelCase : List[str] = key_dim
_UpperCAmelCase : Tuple = attention_ratio
_UpperCAmelCase : int = resolution
_UpperCAmelCase : Optional[Any] = pool_size
_UpperCAmelCase : str = downsample_patch_size
_UpperCAmelCase : str = downsample_stride
_UpperCAmelCase : Optional[int] = downsample_pad
_UpperCAmelCase : int = drop_path_rate
_UpperCAmelCase : Union[str, Any] = num_metaad_blocks
_UpperCAmelCase : List[Any] = distillation
_UpperCAmelCase : Optional[int] = use_layer_scale
_UpperCAmelCase : Optional[int] = layer_scale_init_value
_UpperCAmelCase : str = image_size
_UpperCAmelCase : List[Any] = batch_norm_eps
| 31 | '''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class lowerCamelCase_ (unittest.TestCase ):
'''simple docstring'''
def __init__( self : int , A : Dict , A : Optional[int]=7 , A : Tuple=3 , A : Optional[Any]=10 , A : int=18 , A : Dict=30 , A : List[str]=400 , A : int=True , A : Optional[Any]=None , A : Optional[Any]=True , A : List[Any]=[0.5, 0.5, 0.5] , A : List[str]=[0.5, 0.5, 0.5] , A : Optional[int]=None , ):
_UpperCAmelCase : Dict = size if size is not None else {"shortest_edge": 18}
_UpperCAmelCase : Optional[Any] = crop_size if crop_size is not None else {"height": 18, "width": 18}
_UpperCAmelCase : Tuple = parent
_UpperCAmelCase : Any = batch_size
_UpperCAmelCase : Optional[int] = num_channels
_UpperCAmelCase : Optional[Any] = num_frames
_UpperCAmelCase : Any = image_size
_UpperCAmelCase : Dict = min_resolution
_UpperCAmelCase : Any = max_resolution
_UpperCAmelCase : Optional[int] = do_resize
_UpperCAmelCase : str = size
_UpperCAmelCase : List[Any] = do_normalize
_UpperCAmelCase : Any = image_mean
_UpperCAmelCase : Tuple = image_std
_UpperCAmelCase : Any = crop_size
def _A ( self : List[Any] ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowerCamelCase_ (snake_case__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase: Dict = VivitImageProcessor if is_vision_available() else None
def _A ( self : int ):
_UpperCAmelCase : Tuple = VivitImageProcessingTester(self )
@property
def _A ( self : Optional[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def _A ( self : Union[str, Any] ):
_UpperCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , "image_mean" ) )
self.assertTrue(hasattr(A , "image_std" ) )
self.assertTrue(hasattr(A , "do_normalize" ) )
self.assertTrue(hasattr(A , "do_resize" ) )
self.assertTrue(hasattr(A , "do_center_crop" ) )
self.assertTrue(hasattr(A , "size" ) )
def _A ( self : List[Any] ):
_UpperCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
_UpperCAmelCase : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def _A ( self : Tuple ):
# Initialize image_processing
_UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
_UpperCAmelCase : Any = prepare_video_inputs(self.image_processor_tester , equal_resolution=A )
for video in video_inputs:
self.assertIsInstance(A , A )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
_UpperCAmelCase : str = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_UpperCAmelCase : List[Any] = image_processing(A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _A ( self : List[Any] ):
# Initialize image_processing
_UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase : List[Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for video in video_inputs:
self.assertIsInstance(A , A )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
_UpperCAmelCase : Tuple = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_UpperCAmelCase : Optional[int] = image_processing(A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _A ( self : List[Any] ):
# Initialize image_processing
_UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase : Optional[int] = prepare_video_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for video in video_inputs:
self.assertIsInstance(A , A )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
_UpperCAmelCase : Optional[Any] = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_UpperCAmelCase : List[Any] = image_processing(A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 31 | 1 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = 42
lowerCamelCase = 42
def __init__( self , _lowerCamelCase , _lowerCamelCase ) -> Dict:
super().__init__()
self.register_modules(unet=_lowerCamelCase , scheduler=_lowerCamelCase )
@torch.no_grad()
def __call__( self , _lowerCamelCase = 1 , _lowerCamelCase = 50 , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , **_lowerCamelCase , ) -> Union[Tuple, ImagePipelineOutput]:
A_ : str = self.unet.config.sample_size
A_ : Optional[int] = (batch_size, 3, img_size, img_size)
A_ : Any = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
A_ : Dict = randn_tensor(_lowerCamelCase , generator=_lowerCamelCase , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_lowerCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
A_ : Optional[Any] = self.scheduler.schedule[t]
A_ : Union[str, Any] = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
A_ , A_ : List[Any] = self.scheduler.add_noise_to_input(_lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
A_ : List[str] = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
A_ : List[Any] = self.scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
A_ : int = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
A_ : Any = self.scheduler.step_correct(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , step_output.prev_sample , step_output["""derivative"""] , )
A_ : Tuple = step_output.prev_sample
A_ : Union[str, Any] = (sample / 2 + 0.5).clamp(0 , 1 )
A_ : Union[str, Any] = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A_ : Dict = self.numpy_to_pil(_lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowerCamelCase )
| 164 |
'''simple docstring'''
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCamelCase__ : List[Any] = logging.get_logger(__name__)
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = ['''input_values''', '''attention_mask''']
def __init__( self , _lowerCamelCase = 1 , _lowerCamelCase = 1_6000 , _lowerCamelCase = 0.0 , _lowerCamelCase = False , _lowerCamelCase = 80 , _lowerCamelCase = 16 , _lowerCamelCase = 64 , _lowerCamelCase = "hann_window" , _lowerCamelCase = 1.0 , _lowerCamelCase = 80 , _lowerCamelCase = 7600 , _lowerCamelCase = 1e-10 , _lowerCamelCase = 2 , _lowerCamelCase = True , **_lowerCamelCase , ) -> List[Any]:
super().__init__(feature_size=_lowerCamelCase , sampling_rate=_lowerCamelCase , padding_value=_lowerCamelCase , **_lowerCamelCase )
A_ : List[Any] = do_normalize
A_ : Union[str, Any] = return_attention_mask
A_ : Tuple = num_mel_bins
A_ : List[str] = hop_length
A_ : int = win_length
A_ : Optional[int] = win_function
A_ : List[Any] = frame_signal_scale
A_ : str = fmin
A_ : Optional[Any] = fmax
A_ : Any = mel_floor
A_ : Any = reduction_factor
A_ : Tuple = win_length * sampling_rate // 1000
A_ : Dict = hop_length * sampling_rate // 1000
A_ : Dict = optimal_fft_length(self.sample_size )
A_ : str = (self.n_fft // 2) + 1
A_ : int = window_function(window_length=self.sample_size , name=self.win_function , periodic=_lowerCamelCase )
A_ : Optional[int] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm="""slaney""" , mel_scale="""slaney""" , )
if frame_signal_scale != 1.0:
warnings.warn(
"""The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers""" , _lowerCamelCase , )
if reduction_factor != 2.0:
warnings.warn(
"""The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers""" , _lowerCamelCase , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def UpperCAmelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0.0 ) -> List[np.ndarray]:
if attention_mask is not None:
A_ : Dict = np.array(_lowerCamelCase , np.intaa )
A_ : Dict = []
for vector, length in zip(_lowerCamelCase , attention_mask.sum(-1 ) ):
A_ : Optional[int] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
A_ : Any = padding_value
normed_input_values.append(_lowerCamelCase )
else:
A_ : List[str] = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def UpperCAmelCase_ ( self , _lowerCamelCase , ) -> np.ndarray:
A_ : int = spectrogram(
_lowerCamelCase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel="""log10""" , )
return log_mel_spec.T
def __call__( self , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ) -> BatchFeature:
if audio is None and audio_target is None:
raise ValueError("""You must provide either `audio` or `audio_target` values.""" )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
F" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"
F" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
if audio is not None:
A_ : Dict = self._process_audio(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase , )
else:
A_ : Optional[int] = None
if audio_target is not None:
A_ : Union[str, Any] = self._process_audio(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase , )
if inputs is None:
return inputs_target
else:
A_ : Optional[int] = inputs_target["""input_values"""]
A_ : Tuple = inputs_target.get("""attention_mask""" )
if decoder_attention_mask is not None:
A_ : int = decoder_attention_mask
return inputs
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ) -> BatchFeature:
A_ : Optional[int] = isinstance(_lowerCamelCase , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
A_ : List[str] = is_batched_numpy or (
isinstance(_lowerCamelCase , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
A_ : Union[str, Any] = [np.asarray(_lowerCamelCase , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(_lowerCamelCase , np.ndarray ):
A_ : List[str] = np.asarray(_lowerCamelCase , dtype=np.floataa )
elif isinstance(_lowerCamelCase , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
A_ : Optional[Any] = speech.astype(np.floataa )
# always return batch
if not is_batched:
A_ : int = [speech]
# needed to make pad() work on spectrogram inputs
A_ : List[Any] = self.feature_size
# convert into correct format for padding
if is_target:
A_ : Tuple = [self._extract_mel_features(_lowerCamelCase ) for waveform in speech]
A_ : Tuple = BatchFeature({"""input_values""": features} )
A_ : Dict = self.num_mel_bins
else:
A_ : Union[str, Any] = BatchFeature({"""input_values""": speech} )
A_ : Tuple = self.pad(
_lowerCamelCase , padding=_lowerCamelCase , max_length=_lowerCamelCase , truncation=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , **_lowerCamelCase , )
A_ : Union[str, Any] = feature_size_hack
# convert input values to correct format
A_ : str = padded_inputs["""input_values"""]
if not isinstance(input_values[0] , np.ndarray ):
A_ : str = [np.asarray(_lowerCamelCase , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(_lowerCamelCase , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
A_ : Tuple = [array.astype(np.floataa ) for array in input_values]
elif isinstance(_lowerCamelCase , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
A_ : List[Any] = input_values.astype(np.floataa )
# convert attention_mask to correct format
A_ : Any = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
A_ : str = [np.asarray(_lowerCamelCase , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
A_ : Any = (
attention_mask
if self._get_padding_strategies(_lowerCamelCase , max_length=_lowerCamelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
A_ : Any = self.zero_mean_unit_var_norm(
padded_inputs["""input_values"""] , attention_mask=_lowerCamelCase , padding_value=self.padding_value )
if return_tensors is not None:
A_ : Dict = padded_inputs.convert_to_tensors(_lowerCamelCase )
return padded_inputs
def UpperCAmelCase_ ( self ) -> Dict[str, Any]:
A_ : List[Any] = super().to_dict()
# Don't serialize these as they are derived from the other properties.
A_ : Optional[int] = ["""window""", """mel_filters""", """sample_size""", """sample_stride""", """n_fft""", """n_freqs"""]
for name in names:
if name in output:
del output[name]
return output
| 164 | 1 |
def lowerCAmelCase_ ( _snake_case : int = 1000000 ) -> int:
'''simple docstring'''
__magic_name__ : str = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , _snake_case ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 281 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
snake_case : Optional[int] = logging.getLogger(__name__)
def lowerCAmelCase_ ( _snake_case : Dict , _snake_case : Union[str, Any] ) -> Tuple:
'''simple docstring'''
__magic_name__ : List[str] = np.argmax(_snake_case , axis=1 )
return np.sum(outputs == labels )
def lowerCAmelCase_ ( _snake_case : Optional[Any] ) -> Dict:
'''simple docstring'''
with open(_snake_case , encoding="utf_8" ) as f:
__magic_name__ : List[str] = csv.reader(_snake_case )
__magic_name__ : List[Any] = []
next(_snake_case ) # skip the first line
for line in tqdm(_snake_case ):
output.append((" ".join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def lowerCAmelCase_ ( _snake_case : str , _snake_case : Tuple , _snake_case : Union[str, Any] , _snake_case : List[Any] , _snake_case : Tuple , _snake_case : Optional[int] ) -> int:
'''simple docstring'''
__magic_name__ : Optional[int] = []
for dataset in encoded_datasets:
__magic_name__ : Union[str, Any] = len(_snake_case )
__magic_name__ : Dict = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
__magic_name__ : List[str] = np.zeros((n_batch, 2) , dtype=np.intaa )
__magic_name__ : Optional[int] = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
__magic_name__ : int = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(_snake_case ):
__magic_name__ : Dict = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__magic_name__ : Dict = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__magic_name__ : str = with_conta
__magic_name__ : Tuple = with_conta
__magic_name__ : Union[str, Any] = len(_snake_case ) - 1
__magic_name__ : int = len(_snake_case ) - 1
__magic_name__ : Optional[Any] = with_conta
__magic_name__ : Optional[Any] = with_conta
__magic_name__ : Optional[int] = mc_label
__magic_name__ : str = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(_snake_case ) for t in all_inputs ) )
return tensor_datasets
def lowerCAmelCase_ ( ) -> List[Any]:
'''simple docstring'''
__magic_name__ : Any = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=_snake_case , default="openai-gpt" , help="pretrained model name" )
parser.add_argument("--do_train" , action="store_true" , help="Whether to run training." )
parser.add_argument("--do_eval" , action="store_true" , help="Whether to run eval on the dev set." )
parser.add_argument(
"--output_dir" , default=_snake_case , type=_snake_case , required=_snake_case , help="The output directory where the model predictions and checkpoints will be written." , )
parser.add_argument("--train_dataset" , type=_snake_case , default="" )
parser.add_argument("--eval_dataset" , type=_snake_case , default="" )
parser.add_argument("--seed" , type=_snake_case , default=42 )
parser.add_argument("--num_train_epochs" , type=_snake_case , default=3 )
parser.add_argument("--train_batch_size" , type=_snake_case , default=8 )
parser.add_argument("--eval_batch_size" , type=_snake_case , default=16 )
parser.add_argument("--adam_epsilon" , default=1E-8 , type=_snake_case , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , type=_snake_case , default=1 )
parser.add_argument(
"--max_steps" , default=-1 , type=_snake_case , help=(
"If > 0: set total number of training steps to perform. Override num_train_epochs."
) , )
parser.add_argument(
"--gradient_accumulation_steps" , type=_snake_case , default=1 , help="Number of updates steps to accumulate before performing a backward/update pass." , )
parser.add_argument("--learning_rate" , type=_snake_case , default=6.25E-5 )
parser.add_argument("--warmup_steps" , default=0 , type=_snake_case , help="Linear warmup over warmup_steps." )
parser.add_argument("--lr_schedule" , type=_snake_case , default="warmup_linear" )
parser.add_argument("--weight_decay" , type=_snake_case , default=0.01 )
parser.add_argument("--lm_coef" , type=_snake_case , default=0.9 )
parser.add_argument("--n_valid" , type=_snake_case , default=374 )
parser.add_argument("--server_ip" , type=_snake_case , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=_snake_case , default="" , help="Can be used for distant debugging." )
__magic_name__ : List[Any] = parser.parse_args()
print(_snake_case )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_snake_case )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
__magic_name__ : Dict = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
__magic_name__ : Optional[int] = torch.cuda.device_count()
logger.info("device: {}, n_gpu {}".format(_snake_case , _snake_case ) )
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True." )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__magic_name__ : List[Any] = ["_start_", "_delimiter_", "_classify_"]
__magic_name__ : Optional[int] = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(_snake_case )
__magic_name__ : Optional[Any] = tokenizer.convert_tokens_to_ids(_snake_case )
__magic_name__ : List[str] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(_snake_case ) )
model.to(_snake_case )
# Load and encode the datasets
def tokenize_and_encode(_snake_case : str ):
if isinstance(_snake_case , _snake_case ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(_snake_case ) )
elif isinstance(_snake_case , _snake_case ):
return obj
return [tokenize_and_encode(_snake_case ) for o in obj]
logger.info("Encoding dataset..." )
__magic_name__ : Optional[int] = load_rocstories_dataset(args.train_dataset )
__magic_name__ : str = load_rocstories_dataset(args.eval_dataset )
__magic_name__ : int = (train_dataset, eval_dataset)
__magic_name__ : List[str] = tokenize_and_encode(_snake_case )
# Compute the max input length for the Transformer
__magic_name__ : Optional[Any] = model.config.n_positions // 2 - 2
__magic_name__ : Optional[int] = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
__magic_name__ : List[str] = min(_snake_case , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__magic_name__ : List[Any] = pre_process_datasets(_snake_case , _snake_case , _snake_case , *_snake_case )
__magic_name__ , __magic_name__ : Optional[int] = tensor_datasets[0], tensor_datasets[1]
__magic_name__ : Tuple = TensorDataset(*_snake_case )
__magic_name__ : Union[str, Any] = RandomSampler(_snake_case )
__magic_name__ : Dict = DataLoader(_snake_case , sampler=_snake_case , batch_size=args.train_batch_size )
__magic_name__ : Any = TensorDataset(*_snake_case )
__magic_name__ : Optional[Any] = SequentialSampler(_snake_case )
__magic_name__ : int = DataLoader(_snake_case , sampler=_snake_case , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__magic_name__ : Tuple = args.max_steps
__magic_name__ : List[str] = args.max_steps // (len(_snake_case ) // args.gradient_accumulation_steps) + 1
else:
__magic_name__ : List[str] = len(_snake_case ) // args.gradient_accumulation_steps * args.num_train_epochs
__magic_name__ : str = list(model.named_parameters() )
__magic_name__ : Dict = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
__magic_name__ : str = [
{
"params": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], "weight_decay": 0.0},
]
__magic_name__ : str = AdamW(_snake_case , lr=args.learning_rate , eps=args.adam_epsilon )
__magic_name__ : List[str] = get_linear_schedule_with_warmup(
_snake_case , num_warmup_steps=args.warmup_steps , num_training_steps=_snake_case )
if args.do_train:
__magic_name__ , __magic_name__ , __magic_name__ : Union[str, Any] = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc="Epoch" ):
__magic_name__ : List[str] = 0
__magic_name__ : Tuple = 0
__magic_name__ : Dict = tqdm(_snake_case , desc="Training" )
for step, batch in enumerate(_snake_case ):
__magic_name__ : Optional[Any] = tuple(t.to(_snake_case ) for t in batch )
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Dict = batch
__magic_name__ : Optional[Any] = model(_snake_case , mc_token_ids=_snake_case , lm_labels=_snake_case , mc_labels=_snake_case )
__magic_name__ : Optional[Any] = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__magic_name__ : List[str] = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__magic_name__ : int = "Training loss: {:.2e} lr: {:.2e}".format(_snake_case , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__magic_name__ : Dict = model.module if hasattr(_snake_case , "module" ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__magic_name__ : List[Any] = os.path.join(args.output_dir , _snake_case )
__magic_name__ : Dict = os.path.join(args.output_dir , _snake_case )
torch.save(model_to_save.state_dict() , _snake_case )
model_to_save.config.to_json_file(_snake_case )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
__magic_name__ : Dict = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
__magic_name__ : Optional[int] = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(_snake_case )
if args.do_eval:
model.eval()
__magic_name__ , __magic_name__ : Any = 0, 0
__magic_name__ , __magic_name__ : Union[str, Any] = 0, 0
for batch in tqdm(_snake_case , desc="Evaluating" ):
__magic_name__ : int = tuple(t.to(_snake_case ) for t in batch )
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Union[str, Any] = batch
with torch.no_grad():
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Dict = model(
_snake_case , mc_token_ids=_snake_case , lm_labels=_snake_case , mc_labels=_snake_case )
__magic_name__ : Tuple = mc_logits.detach().cpu().numpy()
__magic_name__ : Any = mc_labels.to("cpu" ).numpy()
__magic_name__ : str = accuracy(_snake_case , _snake_case )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
__magic_name__ : Tuple = eval_loss / nb_eval_steps
__magic_name__ : List[Any] = eval_accuracy / nb_eval_examples
__magic_name__ : int = tr_loss / nb_tr_steps if args.do_train else None
__magic_name__ : Any = {"eval_loss": eval_loss, "eval_accuracy": eval_accuracy, "train_loss": train_loss}
__magic_name__ : int = os.path.join(args.output_dir , "eval_results.txt" )
with open(_snake_case , "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s" , _snake_case , str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 281 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> float:
'''simple docstring'''
snake_case_ = u
for i in range(1, __UpperCAmelCase ):
snake_case_ = temp * (u - i)
return temp
def __magic_name__ ( ) -> None:
'''simple docstring'''
snake_case_ = int(input('''enter the numbers of values: ''' ) )
snake_case_ = []
for _ in range(__UpperCAmelCase ):
y.append([] )
for i in range(__UpperCAmelCase ):
for j in range(__UpperCAmelCase ):
y[i].append(__UpperCAmelCase )
snake_case_ = 0
print('''enter the values of parameters in a list: ''' )
snake_case_ = list(map(__UpperCAmelCase, input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(__UpperCAmelCase ):
snake_case_ = float(input() )
snake_case_ = int(input('''enter the value to interpolate: ''' ) )
snake_case_ = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1, __UpperCAmelCase ):
for j in range(n - i ):
snake_case_ = y[j + 1][i - 1] - y[j][i - 1]
snake_case_ = y[0][0]
for i in range(1, __UpperCAmelCase ):
summ += (ucal(__UpperCAmelCase, __UpperCAmelCase ) * y[0][i]) / math.factorial(__UpperCAmelCase )
print(F"the value at {value} is {summ}" )
if __name__ == "__main__":
main()
| 72 |
'''simple docstring'''
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
a : Union[str, Any] = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
a : Any = importlib.util.spec_from_file_location(
'transformers',
os.path.join(PATH_TO_TRANSFORMERS, '__init__.py'),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
a : int = spec.loader.load_module()
a : Dict = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
a : str = re.compile('\[(.+?)\]\((https://huggingface\.co/.+?)\)')
a : str = {
'CLIPConfigMixin',
'DecisionTransformerConfigMixin',
'EncoderDecoderConfigMixin',
'RagConfigMixin',
'SpeechEncoderDecoderConfigMixin',
'VisionEncoderDecoderConfigMixin',
'VisionTextDualEncoderConfigMixin',
}
def __magic_name__ ( ) -> Any:
'''simple docstring'''
snake_case_ = []
for config_class in list(CONFIG_MAPPING.values() ):
snake_case_ = False
# source code of `config_class`
snake_case_ = inspect.getsource(__UpperCAmelCase )
snake_case_ = _re_checkpoint.findall(__UpperCAmelCase )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
snake_case_ ,snake_case_ = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
snake_case_ = F"https://huggingface.co/{ckpt_name}"
if ckpt_link == ckpt_link_from_name:
snake_case_ = True
break
snake_case_ = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
snake_case_ = '''\n'''.join(sorted(__UpperCAmelCase ) )
raise ValueError(F"The following configurations don't contain any valid checkpoint:\n{message}" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 72 | 1 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
A : Any = "examples/"
A : Optional[Any] = {
"examples": (re.compile(R"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(R"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(R"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), R"\1version=\"VERSION\","),
"doc": (re.compile(R"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
A : Optional[int] = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
A : List[Any] = "README.md"
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
with open(_UpperCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
__lowerCAmelCase = f.read()
__lowerCAmelCase , __lowerCAmelCase = REPLACE_PATTERNS[pattern]
__lowerCAmelCase = replace.replace("VERSION" , _UpperCamelCase )
__lowerCAmelCase = re_pattern.sub(_UpperCamelCase , _UpperCamelCase )
with open(_UpperCamelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(_UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
for folder, directories, fnames in os.walk(_UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase , pattern="examples" )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if not patch:
update_version_in_examples(_UpperCamelCase )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = "🤗 Transformers currently provides the following architectures"
__lowerCAmelCase = "1. Want to contribute a new model?"
with open(_UpperCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
__lowerCAmelCase = f.readlines()
# Find the start of the list.
__lowerCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__lowerCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
__lowerCAmelCase = lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , )
index += 1
with open(_UpperCamelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(_UpperCamelCase )
def _lowerCamelCase ( ):
'''simple docstring'''
with open(REPLACE_FILES["init"] , "r" ) as f:
__lowerCAmelCase = f.read()
__lowerCAmelCase = REPLACE_PATTERNS["init"][0].search(_UpperCamelCase ).groups()[0]
return packaging.version.parse(_UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase=False ):
'''simple docstring'''
__lowerCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
__lowerCAmelCase = default_version.base_version
elif patch:
__lowerCAmelCase = f"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
__lowerCAmelCase = f"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
__lowerCAmelCase = input(f"Which version are you releasing? [{default_version}]" )
if len(_UpperCamelCase ) == 0:
__lowerCAmelCase = default_version
print(f"Updating version to {version}." )
global_version_update(_UpperCamelCase , patch=_UpperCamelCase )
if not patch:
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = get_version()
__lowerCAmelCase = f"{current_version.major}.{current_version.minor + 1}.0.dev0"
__lowerCAmelCase = current_version.base_version
# Check with the user we got that right.
__lowerCAmelCase = input(f"Which version are we developing now? [{dev_version}]" )
if len(_UpperCamelCase ) == 0:
__lowerCAmelCase = dev_version
print(f"Updating version to {version}." )
global_version_update(_UpperCamelCase )
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
A : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
A : Dict = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 57 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self ):
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"的",
"价",
"格",
"是",
"15",
"便",
"alex",
"##andra",
",",
"。",
"-",
"t",
"shirt",
]
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
__lowerCAmelCase = {
"do_resize": True,
"size": {"height": 2_24, "width": 2_24},
"do_center_crop": True,
"crop_size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"image_std": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
"do_convert_rgb": True,
}
__lowerCAmelCase = os.path.join(self.tmpdirname , __a )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__a , __a )
def snake_case ( self , **__a ):
return BertTokenizer.from_pretrained(self.tmpdirname , **__a )
def snake_case ( self , **__a ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__a )
def snake_case ( self , **__a ):
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **__a )
def snake_case ( self ):
shutil.rmtree(self.tmpdirname )
def snake_case ( self ):
__lowerCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowerCAmelCase = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case ( self ):
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
processor_slow.save_pretrained(self.tmpdirname )
__lowerCAmelCase = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__a )
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
processor_fast.save_pretrained(self.tmpdirname )
__lowerCAmelCase = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __a )
self.assertIsInstance(processor_fast.tokenizer , __a )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __a )
self.assertIsInstance(processor_fast.image_processor , __a )
def snake_case ( self ):
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase = self.get_tokenizer(cls_token="(CLS)" , sep_token="(SEP)" )
__lowerCAmelCase = self.get_image_processor(do_normalize=__a )
__lowerCAmelCase = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="(CLS)" , sep_token="(SEP)" , do_normalize=__a )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = image_processor(__a , return_tensors="np" )
__lowerCAmelCase = processor(images=__a , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = "Alexandra,T-shirt的价格是15便士。"
__lowerCAmelCase = processor(text=__a )
__lowerCAmelCase = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = "Alexandra,T-shirt的价格是15便士。"
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__a ):
processor()
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowerCAmelCase = processor.batch_decode(__a )
__lowerCAmelCase = tokenizer.batch_decode(__a )
self.assertListEqual(__a , __a )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = "Alexandra,T-shirt的价格是15便士。"
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 57 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 132 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : int = 1_00_00_00 ):
"""simple docstring"""
_snake_case : Dict = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , snake_case__ ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 132 | 1 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
def A_ ( _lowercase ):
'''simple docstring'''
create_state_space_tree(_lowercase, [], 0 )
def A_ ( _lowercase, _lowercase, _lowercase ):
'''simple docstring'''
if index == len(_lowercase ):
print(_lowercase )
return
create_state_space_tree(_lowercase, _lowercase, index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(_lowercase, _lowercase, index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
__a = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["A", "B", "C"])
generate_all_subsequences(seq)
| 66 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A_ :
"""simple docstring"""
def __init__( self :Tuple , lowerCamelCase_ :Dict , lowerCamelCase_ :int=13 , lowerCamelCase_ :Optional[int]=32 , lowerCamelCase_ :Any=3 , lowerCamelCase_ :Union[str, Any]=4 , lowerCamelCase_ :int=[10, 20, 30, 40] , lowerCamelCase_ :Dict=[2, 2, 3, 2] , lowerCamelCase_ :Any=True , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :Any=37 , lowerCamelCase_ :Dict="gelu" , lowerCamelCase_ :List[Any]=10 , lowerCamelCase_ :int=0.02 , lowerCamelCase_ :Optional[Any]=["stage2", "stage3", "stage4"] , lowerCamelCase_ :List[str]=[2, 3, 4] , lowerCamelCase_ :Optional[Any]=None , ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =parent
lowerCamelCase__ : Optional[int] =batch_size
lowerCamelCase__ : List[Any] =image_size
lowerCamelCase__ : Dict =num_channels
lowerCamelCase__ : Optional[int] =num_stages
lowerCamelCase__ : Optional[int] =hidden_sizes
lowerCamelCase__ : Optional[Any] =depths
lowerCamelCase__ : Any =is_training
lowerCamelCase__ : Optional[Any] =use_labels
lowerCamelCase__ : List[str] =intermediate_size
lowerCamelCase__ : int =hidden_act
lowerCamelCase__ : Dict =num_labels
lowerCamelCase__ : Union[str, Any] =initializer_range
lowerCamelCase__ : Dict =out_features
lowerCamelCase__ : List[str] =out_indices
lowerCamelCase__ : Any =scope
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Any =None
if self.use_labels:
lowerCamelCase__ : str =ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase__ : Tuple =self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def UpperCAmelCase__ ( self :str , lowerCamelCase_ :Dict , lowerCamelCase_ :int , lowerCamelCase_ :Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : Tuple =ConvNextVaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Union[str, Any] =model(lowerCamelCase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase__ ( self :Optional[Any] , lowerCamelCase_ :Dict , lowerCamelCase_ :Any , lowerCamelCase_ :List[Any] ):
"""simple docstring"""
lowerCamelCase__ : List[str] =ConvNextVaForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Union[str, Any] =model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self :Optional[Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =ConvNextVaBackbone(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Any =model(lowerCamelCase_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCamelCase__ : Union[str, Any] =None
lowerCamelCase__ : Dict =ConvNextVaBackbone(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[str] =model(lowerCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : List[str] =self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =config_and_inputs
lowerCamelCase__ : Dict ={'pixel_values': pixel_values}
return config, inputs_dict
def UpperCAmelCase__ ( self :Any ):
"""simple docstring"""
lowerCamelCase__ : str =self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =config_and_inputs
lowerCamelCase__ : Optional[int] ={'pixel_values': pixel_values, 'labels': labels}
return config, inputs_dict
@require_torch
class A_ ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ = (
{"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def UpperCAmelCase__ ( self :Tuple ):
"""simple docstring"""
lowerCamelCase__ : int =ConvNextVaModelTester(self )
lowerCamelCase__ : Tuple =ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 )
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase__ ( self :Dict ):
"""simple docstring"""
return
@unittest.skip(reason='ConvNextV2 does not use inputs_embeds' )
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
pass
@unittest.skip(reason='ConvNextV2 does not support input and output embeddings' )
def UpperCAmelCase__ ( self :Dict ):
"""simple docstring"""
pass
@unittest.skip(reason='ConvNextV2 does not use feedforward chunking' )
def UpperCAmelCase__ ( self :int ):
"""simple docstring"""
pass
def UpperCAmelCase__ ( self :Tuple ):
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowerCamelCase__ , lowerCamelCase__ : List[str] =self.model_tester.prepare_config_and_inputs_with_labels()
lowerCamelCase__ : List[str] =True
if model_class.__name__ in [
*get_values(lowerCamelCase_ ),
*get_values(lowerCamelCase_ ),
]:
continue
lowerCamelCase__ : Any =model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.train()
lowerCamelCase__ : Union[str, Any] =self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ )
lowerCamelCase__ : Dict =model(**lowerCamelCase_ ).loss
loss.backward()
def UpperCAmelCase__ ( self :int ):
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowerCamelCase__ , lowerCamelCase__ : Dict =self.model_tester.prepare_config_and_inputs_with_labels()
lowerCamelCase__ : Optional[Any] =False
lowerCamelCase__ : Optional[Any] =True
if (
model_class.__name__
in [*get_values(lowerCamelCase_ ), *get_values(lowerCamelCase_ )]
or not model_class.supports_gradient_checkpointing
):
continue
lowerCamelCase__ : List[str] =model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.gradient_checkpointing_enable()
model.train()
lowerCamelCase__ : List[Any] =self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ )
lowerCamelCase__ : Dict =model(**lowerCamelCase_ ).loss
loss.backward()
def UpperCAmelCase__ ( self :Dict ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[Any] =model_class(lowerCamelCase_ )
lowerCamelCase__ : Dict =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Union[str, Any] =[*signature.parameters.keys()]
lowerCamelCase__ : Union[str, Any] =['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
lowerCamelCase__ : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def UpperCAmelCase__ ( self :int ):
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase_ :List[str] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Optional[Any] ):
lowerCamelCase__ : Tuple =model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Optional[int] =model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
lowerCamelCase__ : int =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase__ : Union[str, Any] =self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase_ ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase__ , lowerCamelCase__ : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : str =True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : str =True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase__ ( self :int ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def UpperCAmelCase__ ( self :Any ):
"""simple docstring"""
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Optional[int] =ConvNextVaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def lowerCAmelCase_ ( ) ->List[str]:
lowerCamelCase__ : Tuple =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase__ ( self :Optional[Any] ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('facebook/convnextv2-tiny-1k-224' ) if is_vision_available() else None
@slow
def UpperCAmelCase__ ( self :int ):
"""simple docstring"""
lowerCamelCase__ : int =ConvNextVaForImageClassification.from_pretrained('facebook/convnextv2-tiny-1k-224' ).to(lowerCamelCase_ )
lowerCamelCase__ : List[Any] =self.default_image_processor
lowerCamelCase__ : int =prepare_img()
lowerCamelCase__ : List[Any] =preprocessor(images=lowerCamelCase_ , return_tensors='pt' ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : List[str] =model(**lowerCamelCase_ )
# verify the logits
lowerCamelCase__ : Union[str, Any] =torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
lowerCamelCase__ : Dict =torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1e-4 ) ) | 126 | 0 |
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class a ( A_ ):
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
warnings.warn(
"The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PerceiverImageProcessor instead." , snake_case__ , )
super().__init__(*snake_case__ , **snake_case__ )
| 360 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class a :
def __init__( self , A_ = None ):
'''simple docstring'''
if components is None:
_UpperCAmelCase : Dict = []
_UpperCAmelCase : Dict = list(A_ )
def __len__( self ):
'''simple docstring'''
return len(self.__components )
def __str__( self ):
'''simple docstring'''
return "(" + ",".join(map(A_ , self.__components ) ) + ")"
def __add__( self , A_ ):
'''simple docstring'''
_UpperCAmelCase : int = len(self )
if size == len(A_ ):
_UpperCAmelCase : Union[str, Any] = [self.__components[i] + other.component(A_ ) for i in range(A_ )]
return Vector(A_ )
else:
raise Exception("must have the same size" )
def __sub__( self , A_ ):
'''simple docstring'''
_UpperCAmelCase : List[str] = len(self )
if size == len(A_ ):
_UpperCAmelCase : Optional[Any] = [self.__components[i] - other.component(A_ ) for i in range(A_ )]
return Vector(A_ )
else: # error case
raise Exception("must have the same size" )
@overload
def __mul__( self , A_ ):
'''simple docstring'''
...
@overload
def __mul__( self , A_ ):
'''simple docstring'''
...
def __mul__( self , A_ ):
'''simple docstring'''
if isinstance(A_ , (float, int) ):
_UpperCAmelCase : str = [c * other for c in self.__components]
return Vector(A_ )
elif isinstance(A_ , A_ ) and len(self ) == len(A_ ):
_UpperCAmelCase : int = len(self )
_UpperCAmelCase : Any = [self.__components[i] * other.component(A_ ) for i in range(A_ )]
return sum(A_ )
else: # error case
raise Exception("invalid operand!" )
def _UpperCAmelCase ( self ):
'''simple docstring'''
return Vector(self.__components )
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
if isinstance(A_ , A_ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("index out of range" )
def _UpperCAmelCase ( self , A_ , A_ ):
'''simple docstring'''
assert -len(self.__components ) <= pos < len(self.__components )
_UpperCAmelCase : Any = value
def _UpperCAmelCase ( self ):
'''simple docstring'''
if len(self.__components ) == 0:
raise Exception("Vector is empty" )
_UpperCAmelCase : Dict = [c**2 for c in self.__components]
return math.sqrt(sum(A_ ) )
def _UpperCAmelCase ( self , A_ , A_ = False ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = self * other
_UpperCAmelCase : str = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int ) -> Vector:
assert isinstance(lowerCAmelCase , lowerCAmelCase )
return Vector([0] * dimension )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int , lowerCAmelCase: int ) -> Vector:
assert isinstance(lowerCAmelCase , lowerCAmelCase ) and (isinstance(lowerCAmelCase , lowerCAmelCase ))
_UpperCAmelCase : Tuple = [0] * dimension
_UpperCAmelCase : Optional[Any] = 1
return Vector(lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: float , lowerCAmelCase: Vector , lowerCAmelCase: Vector ) -> Vector:
assert (
isinstance(lowerCAmelCase , lowerCAmelCase )
and isinstance(lowerCAmelCase , lowerCAmelCase )
and (isinstance(lowerCAmelCase , (int, float) ))
)
return x * scalar + y
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: int ) -> Vector:
random.seed(lowerCAmelCase )
_UpperCAmelCase : Optional[int] = [random.randint(lowerCAmelCase , lowerCAmelCase ) for _ in range(lowerCAmelCase )]
return Vector(lowerCAmelCase )
class a :
def __init__( self , A_ , A_ , A_ ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = matrix
_UpperCAmelCase : Union[str, Any] = w
_UpperCAmelCase : str = h
def __str__( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] = ""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , A_ ):
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
_UpperCAmelCase : Optional[int] = []
for i in range(self.__height ):
_UpperCAmelCase : List[str] = [
self.__matrix[i][j] + other.component(A_ , A_ )
for j in range(self.__width )
]
matrix.append(A_ )
return Matrix(A_ , self.__width , self.__height )
else:
raise Exception("matrix must have the same dimension!" )
def __sub__( self , A_ ):
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
_UpperCAmelCase : Optional[Any] = []
for i in range(self.__height ):
_UpperCAmelCase : Tuple = [
self.__matrix[i][j] - other.component(A_ , A_ )
for j in range(self.__width )
]
matrix.append(A_ )
return Matrix(A_ , self.__width , self.__height )
else:
raise Exception("matrices must have the same dimension!" )
@overload
def __mul__( self , A_ ):
'''simple docstring'''
...
@overload
def __mul__( self , A_ ):
'''simple docstring'''
...
def __mul__( self , A_ ):
'''simple docstring'''
if isinstance(A_ , A_ ): # matrix-vector
if len(A_ ) == self.__width:
_UpperCAmelCase : Union[str, Any] = zero_vector(self.__height )
for i in range(self.__height ):
_UpperCAmelCase : List[Any] = [
self.__matrix[i][j] * other.component(A_ )
for j in range(self.__width )
]
ans.change_component(A_ , sum(A_ ) )
return ans
else:
raise Exception(
"vector must have the same size as the "
"number of columns of the matrix!" )
elif isinstance(A_ , (int, float) ): # matrix-scalar
_UpperCAmelCase : Any = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(A_ , self.__width , self.__height )
return None
def _UpperCAmelCase ( self ):
'''simple docstring'''
return self.__height
def _UpperCAmelCase ( self ):
'''simple docstring'''
return self.__width
def _UpperCAmelCase ( self , A_ , A_ ):
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("change_component: indices out of bounds" )
def _UpperCAmelCase ( self , A_ , A_ , A_ ):
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
_UpperCAmelCase : Tuple = value
else:
raise Exception("change_component: indices out of bounds" )
def _UpperCAmelCase ( self , A_ , A_ ):
'''simple docstring'''
if self.__height != self.__width:
raise Exception("Matrix is not square" )
_UpperCAmelCase : Union[str, Any] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(A_ ) ):
_UpperCAmelCase : Tuple = minor[i][:y] + minor[i][y + 1 :]
return Matrix(A_ , self.__width - 1 , self.__height - 1 ).determinant()
def _UpperCAmelCase ( self , A_ , A_ ):
'''simple docstring'''
if self.__height != self.__width:
raise Exception("Matrix is not square" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(A_ , A_ )
else:
raise Exception("Indices out of bounds" )
def _UpperCAmelCase ( self ):
'''simple docstring'''
if self.__height != self.__width:
raise Exception("Matrix is not square" )
if self.__height < 1:
raise Exception("Matrix has no element" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
_UpperCAmelCase : Dict = [
self.__matrix[0][y] * self.cofactor(0 , A_ ) for y in range(self.__width )
]
return sum(A_ )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int ) -> Matrix:
_UpperCAmelCase : list[list[float]] = [[0] * n for _ in range(lowerCAmelCase )]
return Matrix(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: int ) -> Matrix:
random.seed(lowerCAmelCase )
_UpperCAmelCase : list[list[float]] = [
[random.randint(lowerCAmelCase , lowerCAmelCase ) for _ in range(lowerCAmelCase )] for _ in range(lowerCAmelCase )
]
return Matrix(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
| 189 | 0 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = ["""image_processor""", """tokenizer"""]
lowerCAmelCase_ = """BlipImageProcessor"""
lowerCAmelCase_ = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = False
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = self.image_processor
def __call__( self , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = True , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = 0 , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = True , __lowerCAmelCase = None , **__lowerCAmelCase , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
lowerCamelCase__ = self.tokenizer
lowerCamelCase__ = self.tokenizer(
text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , stride=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_overflowing_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_length=__lowerCAmelCase , verbose=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , )
return text_encoding
# add pixel_values
lowerCamelCase__ = self.image_processor(__lowerCAmelCase , return_tensors=__lowerCAmelCase )
if text is not None:
lowerCamelCase__ = self.tokenizer(
text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , stride=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_overflowing_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_length=__lowerCAmelCase , verbose=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , )
else:
lowerCamelCase__ = None
if text_encoding is not None:
encoding_image_processor.update(__lowerCAmelCase )
return encoding_image_processor
def __lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def __lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.tokenizer.model_input_names
lowerCamelCase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 209 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = ["""image_processor""", """tokenizer"""]
lowerCAmelCase_ = """BlipImageProcessor"""
lowerCAmelCase_ = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = False
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = self.image_processor
def __call__( self , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = True , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = 0 , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = True , __lowerCAmelCase = None , **__lowerCAmelCase , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
lowerCamelCase__ = self.tokenizer
lowerCamelCase__ = self.tokenizer(
text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , stride=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_overflowing_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_length=__lowerCAmelCase , verbose=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , )
return text_encoding
# add pixel_values
lowerCamelCase__ = self.image_processor(__lowerCAmelCase , return_tensors=__lowerCAmelCase )
if text is not None:
lowerCamelCase__ = self.tokenizer(
text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , stride=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_overflowing_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_length=__lowerCAmelCase , verbose=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , )
else:
lowerCamelCase__ = None
if text_encoding is not None:
encoding_image_processor.update(__lowerCAmelCase )
return encoding_image_processor
def __lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def __lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.tokenizer.model_input_names
lowerCamelCase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 209 | 1 |
"""simple docstring"""
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('''repo_id''', ['''canonical_dataset_name''', '''org-name/dataset-name'''] )
@pytest.mark.parametrize('''path''', ['''filename.csv''', '''filename with blanks.csv'''] )
@pytest.mark.parametrize('''revision''', [None, '''v2'''] )
def snake_case_ ( A_ : Tuple, A_ : Union[str, Any], A_ : List[str] ):
'''simple docstring'''
_lowerCamelCase : List[str] = hf_hub_url(repo_id=A_, path=A_, revision=A_ )
assert url == F'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(A_ )}'''
| 353 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case ( _lowercase):
def __init__( self : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any]=1_3 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : Any=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : int=False , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Dict=9_9 , __lowerCAmelCase : str=0 , __lowerCAmelCase : Optional[Any]=3_2 , __lowerCAmelCase : Tuple=5 , __lowerCAmelCase : Tuple=4 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Optional[int]=5_1_2 , __lowerCAmelCase : Any=1_2 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : Any=0.02 , __lowerCAmelCase : str=3 , __lowerCAmelCase : Optional[int]=4 , __lowerCAmelCase : Optional[int]="last" , __lowerCAmelCase : str=None , __lowerCAmelCase : int=None , ):
"""simple docstring"""
_lowerCamelCase : Dict = parent
_lowerCamelCase : List[str] = batch_size
_lowerCamelCase : Dict = seq_length
_lowerCamelCase : List[Any] = is_training
_lowerCamelCase : Dict = use_input_lengths
_lowerCamelCase : Tuple = use_token_type_ids
_lowerCamelCase : Any = use_labels
_lowerCamelCase : Optional[Any] = gelu_activation
_lowerCamelCase : Optional[Any] = sinusoidal_embeddings
_lowerCamelCase : Dict = causal
_lowerCamelCase : Dict = asm
_lowerCamelCase : str = n_langs
_lowerCamelCase : str = vocab_size
_lowerCamelCase : Optional[int] = n_special
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : int = num_hidden_layers
_lowerCamelCase : str = num_attention_heads
_lowerCamelCase : Dict = hidden_dropout_prob
_lowerCamelCase : int = attention_probs_dropout_prob
_lowerCamelCase : Any = max_position_embeddings
_lowerCamelCase : Any = type_vocab_size
_lowerCamelCase : Optional[int] = type_sequence_label_size
_lowerCamelCase : List[str] = initializer_range
_lowerCamelCase : List[Any] = num_labels
_lowerCamelCase : Dict = num_choices
_lowerCamelCase : str = summary_type
_lowerCamelCase : List[str] = use_proj
_lowerCamelCase : int = scope
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Optional[int] = None
if self.use_input_lengths:
_lowerCamelCase : int = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
_lowerCamelCase : Union[str, Any] = None
if self.use_token_type_ids:
_lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
_lowerCamelCase : Union[str, Any] = None
_lowerCamelCase : List[str] = None
_lowerCamelCase : Optional[Any] = None
if self.use_labels:
_lowerCamelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCamelCase : str = ids_tensor([self.batch_size] , 2 ).float()
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
_lowerCamelCase : Tuple = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = FlaubertModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase , lengths=__lowerCAmelCase , langs=__lowerCAmelCase )
_lowerCamelCase : str = model(__lowerCAmelCase , langs=__lowerCAmelCase )
_lowerCamelCase : List[str] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : str , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , ):
"""simple docstring"""
_lowerCamelCase : Tuple = FlaubertWithLMHeadModel(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : str = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = FlaubertForQuestionAnsweringSimple(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : List[str] = model(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , ):
"""simple docstring"""
_lowerCamelCase : str = FlaubertForQuestionAnswering(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = model(
__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , cls_index=__lowerCAmelCase , is_impossible=__lowerCAmelCase , p_mask=__lowerCAmelCase , )
_lowerCamelCase : List[str] = model(
__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , cls_index=__lowerCAmelCase , is_impossible=__lowerCAmelCase , )
((_lowerCamelCase) , ) : str = result_with_labels.to_tuple()
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase )
((_lowerCamelCase) , ) : Union[str, Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : str , ):
"""simple docstring"""
_lowerCamelCase : Dict = FlaubertForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : str = model(__lowerCAmelCase )
_lowerCamelCase : Tuple = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , ):
"""simple docstring"""
_lowerCamelCase : Any = self.num_labels
_lowerCamelCase : List[str] = FlaubertForTokenClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , ):
"""simple docstring"""
_lowerCamelCase : List[str] = self.num_choices
_lowerCamelCase : Any = FlaubertForMultipleChoice(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase : List[str] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase : int = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Any = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Optional[Any] = config_and_inputs
_lowerCamelCase : int = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''lengths''': input_lengths,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __snake_case ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ : List[str] = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
snake_case__ : List[Any] = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int]=False ):
"""simple docstring"""
_lowerCamelCase : Dict = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
_lowerCamelCase : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase )
_lowerCamelCase : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase )
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : int = FlaubertModelTester(self )
_lowerCamelCase : str = ConfigTester(self , config_class=__lowerCAmelCase , emb_dim=3_7 )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*__lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Dict = FlaubertModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
_lowerCamelCase : Any = True
_lowerCamelCase : int = model_class(config=__lowerCAmelCase )
_lowerCamelCase : List[str] = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : int = torch.jit.trace(
__lowerCAmelCase , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , '''traced_model.pt''' ) )
_lowerCamelCase : Union[str, Any] = torch.jit.load(os.path.join(__lowerCAmelCase , '''traced_model.pt''' ) , map_location=__lowerCAmelCase )
loaded(inputs_dict['''input_ids'''].to(__lowerCAmelCase ) , inputs_dict['''attention_mask'''].to(__lowerCAmelCase ) )
@require_torch
class __snake_case ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' )
_lowerCamelCase : Any = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
with torch.no_grad():
_lowerCamelCase : Any = model(__lowerCAmelCase )[0]
_lowerCamelCase : Optional[Any] = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , __lowerCAmelCase )
_lowerCamelCase : Optional[int] = torch.tensor(
[[[-2.62_51, -1.42_98, -0.02_27], [-2.85_10, -1.63_87, 0.22_58], [-2.81_14, -1.18_32, -0.30_66]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCAmelCase , atol=1E-4 ) )
| 175 | 0 |
"""simple docstring"""
from __future__ import annotations
class _snake_case :
def __init__( self : Tuple , UpperCAmelCase : int=None ):
__lowerCamelCase : Tuple = data
__lowerCamelCase : int = None
def __repr__( self : Tuple ):
__lowerCamelCase : int = []
__lowerCamelCase : Tuple = self
while temp:
string_rep.append(F"""{temp.data}""" )
__lowerCamelCase : List[Any] = temp.next
return "->".join(UpperCAmelCase )
def lowercase_ ( _lowerCamelCase: list ) -> Union[str, Any]:
'''simple docstring'''
if not elements_list:
raise Exception("The Elements List is empty" )
__lowerCamelCase : int = Node(elements_list[0] )
for i in range(1 , len(_UpperCamelCase ) ):
__lowerCamelCase : Tuple = Node(elements_list[i] )
__lowerCamelCase : Optional[Any] = current.next
return head
def lowercase_ ( _lowerCamelCase: Node ) -> None:
'''simple docstring'''
if head_node is not None and isinstance(_UpperCamelCase , _UpperCamelCase ):
print_reverse(head_node.next )
print(head_node.data )
def lowercase_ ( ) -> Optional[int]:
'''simple docstring'''
from doctest import testmod
testmod()
__lowerCamelCase : Optional[int] = make_linked_list([14, 52, 14, 12, 43] )
print("Linked List:" )
print(_UpperCamelCase )
print("Elements in Reverse:" )
print_reverse(_UpperCamelCase )
if __name__ == "__main__":
main() | 135 | """simple docstring"""
import os
import jsonlines
import numpy as np
from tqdm import tqdm
SCREAMING_SNAKE_CASE__ = 2_048
SCREAMING_SNAKE_CASE__ = 4_096
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = os.environ.pop("PROCESS_TRAIN", "false")
SCREAMING_SNAKE_CASE__ = {"null": 0, "short": 1, "long": 2, "yes": 3, "no": 4}
def lowerCAmelCase__ ( _UpperCamelCase : str ) -> Any:
"""simple docstring"""
def choose_first(_UpperCamelCase : Tuple , _UpperCamelCase : List[Any]=False ):
assert isinstance(_UpperCamelCase , _UpperCamelCase )
if len(_UpperCamelCase ) == 1:
snake_case = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
snake_case = {k: [a[k]] for k in a}
if len(a['start_token'] ) > 0:
break
return a
snake_case = {'id': example['id']}
snake_case = example['annotations']
snake_case = annotation['yes_no_answer']
if 0 in yes_no_answer or 1 in yes_no_answer:
snake_case = ['yes'] if 1 in yes_no_answer else ['no']
snake_case = snake_case = []
snake_case = snake_case = []
snake_case = ['<cls>']
else:
snake_case = ['short']
snake_case = choose_first(annotation['short_answers'] )
if len(out['start_token'] ) == 0:
# answer will be long if short is not available
snake_case = ['long']
snake_case = choose_first(annotation['long_answer'] , is_long_answer=_UpperCamelCase )
snake_case = []
answer.update(_UpperCamelCase )
# disregard some samples
if len(answer['start_token'] ) > 1 or answer["start_token"] == answer["end_token"]:
snake_case = True
else:
snake_case = False
snake_case = ['start_token', 'end_token', 'start_byte', 'end_byte', 'text']
if not all(isinstance(answer[k] , _UpperCamelCase ) for k in cols ):
raise ValueError('Issue in ID' , example['id'] )
return answer
def lowerCAmelCase__ ( _UpperCamelCase : Dict , _UpperCamelCase : List[str]=False ) -> Union[str, Any]:
"""simple docstring"""
snake_case = _get_single_answer(_UpperCamelCase )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
snake_case = example['document']['tokens']
snake_case = []
for i in range(len(doc['token'] ) ):
if not doc["is_html"][i]:
context.append(doc['token'][i] )
return {
"context": " ".join(_UpperCamelCase ),
"answer": {
"start_token": -1_0_0, # ignore index in cross-entropy
"end_token": -1_0_0, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
snake_case = ['start_token', 'end_token']
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
snake_case = example['document']['tokens']
snake_case = answer['start_token']
snake_case = answer['end_token']
snake_case = []
for i in range(len(doc['token'] ) ):
if not doc["is_html"][i]:
context.append(doc['token'][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
snake_case = ' '.join(context[start_token:end_token] )
# checking above code
if assertion:
snake_case = doc['is_html'][answer['start_token'] : answer['end_token']]
snake_case = doc['token'][answer['start_token'] : answer['end_token']]
snake_case = ' '.join([old[i] for i in range(len(_UpperCamelCase ) ) if not is_html[i]] )
if new != old:
print('ID:' , example['id'] )
print('New:' , _UpperCamelCase , end='\n' )
print('Old:' , _UpperCamelCase , end='\n\n' )
return {
"context": " ".join(_UpperCamelCase ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def lowerCAmelCase__ ( _UpperCamelCase : Dict , _UpperCamelCase : Any , _UpperCamelCase : Optional[int]=2_0_4_8 , _UpperCamelCase : Union[str, Any]=4_0_9_6 , _UpperCamelCase : Dict=True ) -> Optional[Any]:
"""simple docstring"""
snake_case = get_context_and_ans(_UpperCamelCase , assertion=_UpperCamelCase )
snake_case = out['answer']
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
snake_case = tokenizer(example['question']['text'] , out['context'] ).input_ids
snake_case = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
snake_case = []
snake_case = []
snake_case = input_ids[:q_len]
snake_case = range(_UpperCamelCase , len(_UpperCamelCase ) , max_length - doc_stride )
for i in doc_start_indices:
snake_case = i + max_length - q_len
snake_case = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer['category'][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-1_0_0] * len(_UpperCamelCase ),
"end_token": [-1_0_0] * len(_UpperCamelCase ),
"category": category,
},
}
snake_case = out['context'].split()
snake_case = splitted_context[answer['end_token']]
snake_case = len(
tokenizer(
' '.join(splitted_context[: answer['start_token']] ) , add_special_tokens=_UpperCamelCase , ).input_ids )
snake_case = len(
tokenizer(' '.join(splitted_context[: answer['end_token']] ) , add_special_tokens=_UpperCamelCase ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
snake_case = len(tokenizer(_UpperCamelCase , add_special_tokens=_UpperCamelCase ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
snake_case = input_ids[answer['start_token'] : answer['end_token'] + 1] # right & left are inclusive
snake_case = answer['start_token']
snake_case = answer['end_token']
if assertion:
snake_case = tokenizer.decode(_UpperCamelCase )
if answer["span"] != new:
print('ISSUE IN TOKENIZATION' )
print('OLD:' , answer['span'] )
print('NEW:' , _UpperCamelCase , end='\n\n' )
if len(_UpperCamelCase ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
snake_case = input_ids[:q_len]
snake_case = range(_UpperCamelCase , len(_UpperCamelCase ) , max_length - doc_stride )
snake_case = []
snake_case = []
snake_case = []
snake_case = [] # null, yes, no, long, short
for i in doc_start_indices:
snake_case = i + max_length - q_len
snake_case = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
snake_case = start_token - i + q_len
snake_case = end_token - i + q_len
answers_category.append(answer['category'][0] ) # ["short"] -> "short"
else:
snake_case = -1_0_0
snake_case = -1_0_0
answers_category.append('null' )
snake_case = inputs[-1][start_token : end_token + 1]
answers_start_token.append(_UpperCamelCase )
answers_end_token.append(_UpperCamelCase )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print('ISSUE in strided for ID:' , example['id'] )
print('New:' , tokenizer.decode(_UpperCamelCase ) )
print('Old:' , tokenizer.decode(_UpperCamelCase ) , end='\n\n' )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def lowerCAmelCase__ ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int]=2_0_4_8 , _UpperCamelCase : Union[str, Any]=4_0_9_6 , _UpperCamelCase : List[str]=False ) -> Union[str, Any]:
"""simple docstring"""
snake_case = get_strided_contexts_and_ans(
_UpperCamelCase , _UpperCamelCase , doc_stride=_UpperCamelCase , max_length=_UpperCamelCase , assertion=_UpperCamelCase , )
return example
def lowerCAmelCase__ ( _UpperCamelCase : Tuple , _UpperCamelCase : List[str] ) -> Any:
"""simple docstring"""
with jsonlines.open(_UpperCamelCase , 'a' ) as writer:
for example in tqdm(_UpperCamelCase , total=len(_UpperCamelCase ) , desc='Saving samples ... ' ):
snake_case = example['labels']
for ids, start, end, cat in zip(
example['input_ids'] , labels['start_token'] , labels['end_token'] , labels['category'] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
'input_ids': ids,
'start_token': start,
'end_token': end,
'category': CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
SCREAMING_SNAKE_CASE__ = load_dataset("natural_questions")
SCREAMING_SNAKE_CASE__ = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base")
SCREAMING_SNAKE_CASE__ = data["train" if PROCESS_TRAIN == "true" else "validation"]
SCREAMING_SNAKE_CASE__ = {
"tokenizer": tokenizer,
"doc_stride": DOC_STRIDE,
"max_length": MAX_LENGTH,
"assertion": False,
}
SCREAMING_SNAKE_CASE__ = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
SCREAMING_SNAKE_CASE__ = data.remove_columns(["annotations", "document", "id", "question"])
print(data)
np.random.seed(SEED)
SCREAMING_SNAKE_CASE__ = "nq-training.jsonl" if PROCESS_TRAIN == "true" else "nq-validation.jsonl"
save_to_disk(data, file_name=cache_file_name)
| 150 | 0 |
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCAmelCase__ : int =logging.get_logger(__name__)
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = ["""audio_values""", """audio_mask"""]
def __init__( self , lowerCAmelCase__=2_0_4_8 , lowerCAmelCase__=1 , lowerCAmelCase__=[1_6, 1_6] , lowerCAmelCase__=1_2_8 , lowerCAmelCase__=4_4_1_0_0 , lowerCAmelCase__=8_6 , lowerCAmelCase__=2_0_4_8 , lowerCAmelCase__=0.0 , **lowerCAmelCase__ , ):
"""simple docstring"""
super().__init__(
feature_size=lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , padding_value=lowerCAmelCase__ , **lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = spectrogram_length
SCREAMING_SNAKE_CASE_ : int = num_channels
SCREAMING_SNAKE_CASE_ : List[str] = patch_size
SCREAMING_SNAKE_CASE_ : List[Any] = feature_size // self.patch_size[1]
SCREAMING_SNAKE_CASE_ : Optional[int] = n_fft
SCREAMING_SNAKE_CASE_ : List[str] = sampling_rate // hop_length_to_sampling_rate
SCREAMING_SNAKE_CASE_ : Optional[Any] = sampling_rate
SCREAMING_SNAKE_CASE_ : Dict = padding_value
SCREAMING_SNAKE_CASE_ : List[str] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCAmelCase__ , min_frequency=0.0 , max_frequency=22_050.0 , sampling_rate=lowerCAmelCase__ , norm='slaney' , mel_scale='slaney' , ).T
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = spectrogram(
lowerCAmelCase__ , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=80.0 , )
SCREAMING_SNAKE_CASE_ : int = log_spec[:, :-1]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = log_spec - 20.0
SCREAMING_SNAKE_CASE_ : List[str] = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = False , **lowerCAmelCase__ , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
F''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
F''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = isinstance(lowerCAmelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
SCREAMING_SNAKE_CASE_ : Dict = is_batched_numpy or (
isinstance(lowerCAmelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE_ : Optional[Any] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase__ , np.ndarray ):
SCREAMING_SNAKE_CASE_ : Any = np.asarray(lowerCAmelCase__ , dtype=np.floataa )
elif isinstance(lowerCAmelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE_ : Any = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE_ : List[str] = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
SCREAMING_SNAKE_CASE_ : Tuple = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : Any = [np.asarray(lowerCAmelCase__ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
SCREAMING_SNAKE_CASE_ : str = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
SCREAMING_SNAKE_CASE_ : List[str] = np.array(lowerCAmelCase__ ).astype(np.floataa )
# convert into correct format for padding
SCREAMING_SNAKE_CASE_ : Optional[int] = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
SCREAMING_SNAKE_CASE_ : str = np.ones([len(lowerCAmelCase__ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
SCREAMING_SNAKE_CASE_ : str = padded_audio_features * self.padding_value
for i in range(len(lowerCAmelCase__ ) ):
SCREAMING_SNAKE_CASE_ : Tuple = audio_features[i]
SCREAMING_SNAKE_CASE_ : List[str] = feature
# return as BatchFeature
if return_attention_mask:
SCREAMING_SNAKE_CASE_ : Optional[Any] = {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
SCREAMING_SNAKE_CASE_ : List[str] = {'audio_values': padded_audio_features}
SCREAMING_SNAKE_CASE_ : int = BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ )
return encoded_inputs
| 162 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def a__ ( A__, A__, A__, A__, A__ ):
# Load configuration defined in the metadata file
with open(A__ ) as metadata_file:
SCREAMING_SNAKE_CASE_ : List[str] = json.load(A__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = LukeConfig(use_entity_aware_attention=A__, **metadata['model_config'] )
# Load in the weights from the checkpoint_path
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.load(A__, map_location='cpu' )['module']
# Load the entity vocab file
SCREAMING_SNAKE_CASE_ : Union[str, Any] = load_original_entity_vocab(A__ )
# add an entry for [MASK2]
SCREAMING_SNAKE_CASE_ : str = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = XLMRobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
SCREAMING_SNAKE_CASE_ : Any = AddedToken('<ent>', lstrip=A__, rstrip=A__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AddedToken('<ent2>', lstrip=A__, rstrip=A__ )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(A__ )
with open(os.path.join(A__, 'tokenizer_config.json' ), 'r' ) as f:
SCREAMING_SNAKE_CASE_ : str = json.load(A__ )
SCREAMING_SNAKE_CASE_ : List[Any] = 'MLukeTokenizer'
with open(os.path.join(A__, 'tokenizer_config.json' ), 'w' ) as f:
json.dump(A__, A__ )
with open(os.path.join(A__, MLukeTokenizer.vocab_files_names['entity_vocab_file'] ), 'w' ) as f:
json.dump(A__, A__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = MLukeTokenizer.from_pretrained(A__ )
# Initialize the embeddings of the special tokens
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer.convert_tokens_to_ids(['@'] )[0]
SCREAMING_SNAKE_CASE_ : str = tokenizer.convert_tokens_to_ids(['#'] )[0]
SCREAMING_SNAKE_CASE_ : str = state_dict['embeddings.word_embeddings.weight']
SCREAMING_SNAKE_CASE_ : int = word_emb[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE_ : List[str] = word_emb[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE_ : List[str] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
SCREAMING_SNAKE_CASE_ : Tuple = state_dict[bias_name]
SCREAMING_SNAKE_CASE_ : Any = decoder_bias[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = decoder_bias[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
SCREAMING_SNAKE_CASE_ : Tuple = F'''encoder.layer.{layer_index}.attention.self.'''
SCREAMING_SNAKE_CASE_ : Tuple = state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE_ : Dict = state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE_ : Optional[Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
SCREAMING_SNAKE_CASE_ : Union[str, Any] = state_dict['entity_embeddings.entity_embeddings.weight']
SCREAMING_SNAKE_CASE_ : Union[str, Any] = entity_emb[entity_vocab['[MASK]']].unsqueeze(0 )
SCREAMING_SNAKE_CASE_ : List[str] = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
SCREAMING_SNAKE_CASE_ : List[str] = state_dict['entity_predictions.bias']
SCREAMING_SNAKE_CASE_ : str = entity_prediction_bias[entity_vocab['[MASK]']].unsqueeze(0 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
SCREAMING_SNAKE_CASE_ : Tuple = LukeForMaskedLM(config=A__ ).eval()
state_dict.pop('entity_predictions.decoder.weight' )
state_dict.pop('lm_head.decoder.weight' )
state_dict.pop('lm_head.decoder.bias' )
SCREAMING_SNAKE_CASE_ : str = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('lm_head' ) or key.startswith('entity_predictions' )):
SCREAMING_SNAKE_CASE_ : str = state_dict[key]
else:
SCREAMING_SNAKE_CASE_ : Dict = state_dict[key]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = model.load_state_dict(A__, strict=A__ )
if set(A__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(A__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
SCREAMING_SNAKE_CASE_ : List[str] = MLukeTokenizer.from_pretrained(A__, task='entity_classification' )
SCREAMING_SNAKE_CASE_ : Any = 'ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'
SCREAMING_SNAKE_CASE_ : Dict = (0, 9)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer(A__, entity_spans=[span], return_tensors='pt' )
SCREAMING_SNAKE_CASE_ : List[Any] = model(**A__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE_ : List[str] = torch.Size((1, 3_3, 7_6_8) )
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor([[0.08_92, 0.05_96, -0.28_19], [0.01_34, 0.11_99, 0.05_73], [-0.01_69, 0.09_27, 0.06_44]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3], A__, atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE_ : Dict = torch.Size((1, 1, 7_6_8) )
SCREAMING_SNAKE_CASE_ : int = torch.tensor([[-0.14_82, 0.06_09, 0.03_22]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], A__, atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
SCREAMING_SNAKE_CASE_ : Optional[int] = MLukeTokenizer.from_pretrained(A__ )
SCREAMING_SNAKE_CASE_ : Tuple = 'Tokyo is the capital of <mask>.'
SCREAMING_SNAKE_CASE_ : Tuple = (2_4, 3_0)
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer(A__, entity_spans=[span], return_tensors='pt' )
SCREAMING_SNAKE_CASE_ : Tuple = model(**A__ )
SCREAMING_SNAKE_CASE_ : List[Any] = encoding['input_ids'][0].tolist()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = input_ids.index(tokenizer.convert_tokens_to_ids('<mask>' ) )
SCREAMING_SNAKE_CASE_ : Any = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(A__ )
SCREAMING_SNAKE_CASE_ : int = outputs.entity_logits[0][0].argmax().item()
SCREAMING_SNAKE_CASE_ : List[str] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('en:' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(A__ ) )
model.save_pretrained(A__ )
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : Tuple = ['[MASK]', '[PAD]', '[UNK]']
SCREAMING_SNAKE_CASE_ : int = [json.loads(A__ ) for line in open(A__ )]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {}
for entry in data:
SCREAMING_SNAKE_CASE_ : List[Any] = entry['id']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
SCREAMING_SNAKE_CASE_ : List[Any] = entity_id
break
SCREAMING_SNAKE_CASE_ : int = F'''{language}:{entity_name}'''
SCREAMING_SNAKE_CASE_ : Optional[int] = entity_id
return new_mapping
if __name__ == "__main__":
lowerCAmelCase__ : Any =argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
lowerCAmelCase__ : List[Any] =parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 162 | 1 |
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse("3.8"):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def lowercase_ (A : Optional[Any] , A : Union[str, Any]=False ):
try:
snake_case__ : Optional[Any] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
snake_case__ : List[Any] = default
else:
# KEY is set, convert it to True or False.
try:
snake_case__ : str = strtobool(A )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'''If set, {key} must be yes or no.''' )
return _value
a_ :List[Any] = parse_flag_from_env("RUN_SLOW", default=False)
a_ :List[Any] = parse_flag_from_env("RUN_REMOTE", default=False)
a_ :Optional[int] = parse_flag_from_env("RUN_LOCAL", default=True)
a_ :Union[str, Any] = parse_flag_from_env("RUN_PACKAGED", default=True)
# Compression
a_ :str = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="test requires lz4")
a_ :List[str] = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="test requires py7zr")
a_ :Union[str, Any] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="test requires zstandard")
# Audio
a_ :Optional[int] = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec("soundfile") is None or version.parse(importlib_metadata.version("soundfile")) < version.parse("0.12.0"),
reason="test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; ",
)
# Beam
a_ :Dict = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("0.3.2"),
reason="test requires apache-beam and a compatible dill version",
)
# Dill-cloudpickle compatibility
a_ :List[Any] = pytest.mark.skipif(
config.DILL_VERSION <= version.parse("0.3.2"),
reason="test requires dill>0.3.2 for cloudpickle compatibility",
)
# Windows
a_ :str = pytest.mark.skipif(
sys.platform == "win32",
reason="test should not be run on Windows",
)
def lowercase_ (A : Union[str, Any] ):
try:
import faiss # noqa
except ImportError:
snake_case__ : str = unittest.skip('test requires faiss' )(A )
return test_case
def lowercase_ (A : Union[str, Any] ):
try:
import regex # noqa
except ImportError:
snake_case__ : List[Any] = unittest.skip('test requires regex' )(A )
return test_case
def lowercase_ (A : str ):
try:
import elasticsearch # noqa
except ImportError:
snake_case__ : Dict = unittest.skip('test requires elasticsearch' )(A )
return test_case
def lowercase_ (A : Tuple ):
try:
import sqlalchemy # noqa
except ImportError:
snake_case__ : int = unittest.skip('test requires sqlalchemy' )(A )
return test_case
def lowercase_ (A : Any ):
if not config.TORCH_AVAILABLE:
snake_case__ : Union[str, Any] = unittest.skip('test requires PyTorch' )(A )
return test_case
def lowercase_ (A : int ):
if not config.TF_AVAILABLE:
snake_case__ : Union[str, Any] = unittest.skip('test requires TensorFlow' )(A )
return test_case
def lowercase_ (A : Tuple ):
if not config.JAX_AVAILABLE:
snake_case__ : Dict = unittest.skip('test requires JAX' )(A )
return test_case
def lowercase_ (A : List[Any] ):
if not config.PIL_AVAILABLE:
snake_case__ : List[Any] = unittest.skip('test requires Pillow' )(A )
return test_case
def lowercase_ (A : Dict ):
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('test requires transformers' )(A )
else:
return test_case
def lowercase_ (A : Dict ):
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('test requires tiktoken' )(A )
else:
return test_case
def lowercase_ (A : Optional[Any] ):
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('test requires spacy' )(A )
else:
return test_case
def lowercase_ (A : Tuple ):
def _require_spacy_model(A : int ):
try:
import spacy # noqa F401
spacy.load(A )
except ImportError:
return unittest.skip('test requires spacy' )(A )
except OSError:
return unittest.skip('test requires spacy model \'{}\''.format(A ) )(A )
else:
return test_case
return _require_spacy_model
def lowercase_ (A : List[Any] ):
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('test requires pyspark' )(A )
else:
return test_case
def lowercase_ (A : Dict ):
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('test requires joblibspark' )(A )
else:
return test_case
def lowercase_ (A : int ):
if not _run_slow_tests or _run_slow_tests == 0:
snake_case__ : List[Any] = unittest.skip('test is slow' )(A )
return test_case
def lowercase_ (A : Union[str, Any] ):
if not _run_local_tests or _run_local_tests == 0:
snake_case__ : List[Any] = unittest.skip('test is local' )(A )
return test_case
def lowercase_ (A : Dict ):
if not _run_packaged_tests or _run_packaged_tests == 0:
snake_case__ : Optional[Any] = unittest.skip('test is packaged' )(A )
return test_case
def lowercase_ (A : Optional[int] ):
if not _run_remote_tests or _run_remote_tests == 0:
snake_case__ : Optional[Any] = unittest.skip('test requires remote' )(A )
return test_case
def lowercase_ (*A : Any ):
def decorate(cls : List[str] ):
for name, fn in cls.__dict__.items():
if callable(A ) and name.startswith('test' ):
for decorator in decorators:
snake_case__ : List[str] = decorator(A )
setattr(cls , A , A )
return cls
return decorate
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
pass
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = 2
@contextmanager
def lowercase_ (A : Tuple=OfflineSimulationMode.CONNECTION_FAILS , A : Any=1e-16 ):
snake_case__ : str = requests.Session().request
def timeout_request(A : Tuple , A : Tuple , A : Tuple , **A : List[str] ):
# Change the url to an invalid url so that the connection hangs
snake_case__ : Any = 'https://10.255.255.1'
if kwargs.get('timeout' ) is None:
raise RequestWouldHangIndefinitelyError(
F'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' )
snake_case__ : List[Any] = timeout
try:
return online_request(A , A , **A )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
snake_case__ : Dict = url
snake_case__ : Optional[Any] = e.args[0]
snake_case__ : str = (max_retry_error.args[0].replace('10.255.255.1' , F'''OfflineMock[{url}]''' ),)
snake_case__ : List[str] = (max_retry_error,)
raise
def raise_connection_error(A : Any , A : Dict , **A : Tuple ):
raise requests.ConnectionError('Offline mode is enabled.' , request=A )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('requests.Session.send' , A ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('requests.Session.request' , A ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('datasets.config.HF_DATASETS_OFFLINE' , A ):
yield
else:
raise ValueError('Please use a value from the OfflineSimulationMode enum.' )
@contextmanager
def lowercase_ (*A : List[str] , **A : Optional[int] ):
snake_case__ : Optional[Any] = str(Path().resolve() )
with tempfile.TemporaryDirectory(*A , **A ) as tmp_dir:
try:
os.chdir(A )
yield
finally:
os.chdir(A )
@contextmanager
def lowercase_ ():
import gc
gc.collect()
snake_case__ : Union[str, Any] = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def lowercase_ ():
import gc
gc.collect()
snake_case__ : Dict = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def lowercase_ (A : Union[str, Any] , A : Union[str, Any] ):
return deepcopy(A ).integers(0 , 1_0_0 , 1_0 ).tolist() == deepcopy(A ).integers(0 , 1_0_0 , 1_0 ).tolist()
def lowercase_ (A : Union[str, Any] ):
import decorator
from requests.exceptions import HTTPError
def _wrapper(A : List[str] , *A : Tuple , **A : Dict ):
try:
return func(*A , **A )
except HTTPError as err:
if str(A ).startswith('500' ) or str(A ).startswith('502' ):
pytest.xfail(str(A ) )
raise err
return decorator.decorator(_wrapper , A )
class snake_case__ :
"""simple docstring"""
def __init__( self : Optional[int], _snake_case : Dict, _snake_case : Optional[Any], _snake_case : int ) ->List[str]:
snake_case__ : Optional[Any] = returncode
snake_case__ : Tuple = stdout
snake_case__ : Optional[int] = stderr
async def lowercase_ (A : Any , A : Optional[int] ):
while True:
snake_case__ : List[str] = await stream.readline()
if line:
callback(A )
else:
break
async def lowercase_ (A : Any , A : Dict=None , A : str=None , A : Union[str, Any]=None , A : List[str]=False , A : Tuple=False ):
if echo:
print('\nRunning: ' , ' '.join(A ) )
snake_case__ : Optional[int] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=A , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=A , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
snake_case__ : Optional[int] = []
snake_case__ : Dict = []
def tee(A : int , A : Tuple , A : Optional[Any] , A : List[str]="" ):
snake_case__ : List[str] = line.decode('utf-8' ).rstrip()
sink.append(A )
if not quiet:
print(A , A , file=A )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda A : tee(A , A , sys.stdout , label='stdout:' ) ),
_read_stream(p.stderr , lambda A : tee(A , A , sys.stderr , label='stderr:' ) ),
] , timeout=A , )
return _RunOutput(await p.wait() , A , A )
def lowercase_ (A : Dict , A : int=None , A : Any=None , A : Union[str, Any]=1_8_0 , A : int=False , A : int=True ):
snake_case__ : Dict = asyncio.get_event_loop()
snake_case__ : List[Any] = loop.run_until_complete(
_stream_subprocess(A , env=A , stdin=A , timeout=A , quiet=A , echo=A ) )
snake_case__ : List[str] = ' '.join(A )
if result.returncode > 0:
snake_case__ : Optional[int] = '\n'.join(result.stderr )
raise RuntimeError(
F'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
F'''The combined stderr from workers follows:\n{stderr}''' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F'''\'{cmd_str}\' produced no output.''' )
return result
def lowercase_ ():
snake_case__ : List[Any] = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' )
snake_case__ : Optional[Any] = re.sub(r'^gw' , '' , A , 0 , re.M )
return int(A )
def lowercase_ ():
snake_case__ : List[Any] = 2_9_5_0_0
snake_case__ : List[Any] = pytest_xdist_worker_id()
return port + uniq_delta
| 277 |
from collections import deque
from .hash_table import HashTable
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Optional[Any], *_snake_case : Optional[Any], **_snake_case : List[Any] ) ->Optional[int]:
super().__init__(*_snake_case, **_snake_case )
def lowercase_ ( self : Optional[Any], _snake_case : Tuple, _snake_case : Dict ) ->Dict:
snake_case__ : int = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(_snake_case )
snake_case__ : Dict = self.values[key]
def lowercase_ ( self : Any ) ->Optional[Any]:
return (
sum(self.charge_factor - len(_snake_case ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def lowercase_ ( self : Union[str, Any], _snake_case : str, _snake_case : Optional[int]=None ) ->Optional[Any]:
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(_snake_case ) == 0
):
return key
return super()._collision_resolution(_snake_case, _snake_case )
| 277 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A ={
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 283 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class _snake_case ( unittest.TestCase ):
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=99 , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=16 , _lowerCamelCase=2 , _lowerCamelCase=0.02 , _lowerCamelCase=4 , ):
UpperCAmelCase__ : Union[str, Any] = parent
UpperCAmelCase__ : str = batch_size
UpperCAmelCase__ : Tuple = seq_length
UpperCAmelCase__ : Any = is_training
UpperCAmelCase__ : Tuple = use_attention_mask
UpperCAmelCase__ : Optional[Any] = use_token_type_ids
UpperCAmelCase__ : Optional[Any] = use_labels
UpperCAmelCase__ : str = vocab_size
UpperCAmelCase__ : Dict = hidden_size
UpperCAmelCase__ : int = num_hidden_layers
UpperCAmelCase__ : Optional[int] = num_attention_heads
UpperCAmelCase__ : Optional[int] = intermediate_size
UpperCAmelCase__ : Tuple = hidden_act
UpperCAmelCase__ : Optional[Any] = hidden_dropout_prob
UpperCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase__ : List[Any] = max_position_embeddings
UpperCAmelCase__ : Optional[int] = type_vocab_size
UpperCAmelCase__ : str = type_sequence_label_size
UpperCAmelCase__ : Optional[int] = initializer_range
UpperCAmelCase__ : str = num_choices
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCAmelCase__ : Union[str, Any] = None
if self.use_attention_mask:
UpperCAmelCase__ : Tuple = random_attention_mask([self.batch_size, self.seq_length])
UpperCAmelCase__ : Dict = None
if self.use_token_type_ids:
UpperCAmelCase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
UpperCAmelCase__ : Optional[Any] = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def snake_case__ ( self):
UpperCAmelCase__ : List[Any] = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = config_and_inputs
UpperCAmelCase__ : Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class _snake_case ( a__ , unittest.TestCase ):
lowerCAmelCase :Optional[int] = True
lowerCAmelCase :Any = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = FlaxRoFormerModelTester(self)
@slow
def snake_case__ ( self):
for model_class_name in self.all_model_classes:
UpperCAmelCase__ : List[str] = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=_lowerCamelCase)
UpperCAmelCase__ : Dict = model(np.ones((1, 1)))
self.assertIsNotNone(_lowerCamelCase)
@require_flax
class _snake_case ( unittest.TestCase ):
@slow
def snake_case__ ( self):
UpperCAmelCase__ : Any = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""")
UpperCAmelCase__ : int = jnp.array([[0, 1, 2, 3, 4, 5]])
UpperCAmelCase__ : Optional[int] = model(_lowerCamelCase)[0]
UpperCAmelCase__ : Union[str, Any] = 5_0000
UpperCAmelCase__ : Any = (1, 6, vocab_size)
self.assertEqual(output.shape , _lowerCamelCase)
UpperCAmelCase__ : Union[str, Any] = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]])
self.assertTrue(jnp.allclose(output[:, :3, :3] , _lowerCamelCase , atol=1e-4)) | 283 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A ( UpperCamelCase_ ):
UpperCamelCase__ : List[Any] =['image_processor', 'tokenizer']
UpperCamelCase__ : Optional[int] ='ChineseCLIPImageProcessor'
UpperCamelCase__ : Optional[Any] =('BertTokenizer', 'BertTokenizerFast')
def __init__( self : Tuple , lowercase_ : List[Any]=None , lowercase_ : Any=None , **lowercase_ : List[Any] ) -> List[str]:
"""simple docstring"""
_lowerCamelCase : Optional[int] =None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowercase_ , )
_lowerCamelCase : Optional[int] =kwargs.pop('feature_extractor' )
_lowerCamelCase : Dict =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowercase_ , lowercase_ )
_lowerCamelCase : Any =self.image_processor
def __call__( self : Any , lowercase_ : Optional[int]=None , lowercase_ : Union[str, Any]=None , lowercase_ : List[Any]=None , **lowercase_ : int ) -> int:
"""simple docstring"""
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_lowerCamelCase : str =self.tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if images is not None:
_lowerCamelCase : Dict =self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if text is not None and images is not None:
_lowerCamelCase : Any =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase_ ) , tensor_type=lowercase_ )
def lowerCamelCase ( self : str , *lowercase_ : List[Any] , **lowercase_ : Dict ) -> int:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def lowerCamelCase ( self : str , *lowercase_ : Optional[int] , **lowercase_ : Union[str, Any] ) -> int:
"""simple docstring"""
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def lowerCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
_lowerCamelCase : List[str] =self.tokenizer.model_input_names
_lowerCamelCase : str =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCamelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowercase_ , )
return self.image_processor_class
| 199 |
from collections.abc import Generator
from math import sin
def a_ ( SCREAMING_SNAKE_CASE__ : bytes ):
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE__ ) != 32:
raise ValueError('Input must be of length 32' )
_lowerCamelCase : Any =b''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def a_ ( SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
if i < 0:
raise ValueError('Input must be non-negative' )
_lowerCamelCase : Optional[int] =format(SCREAMING_SNAKE_CASE__ , '08x' )[-8:]
_lowerCamelCase : List[Any] =b''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def a_ ( SCREAMING_SNAKE_CASE__ : bytes ):
'''simple docstring'''
_lowerCamelCase : Dict =b''
for char in message:
bit_string += format(SCREAMING_SNAKE_CASE__ , '08b' ).encode('utf-8' )
_lowerCamelCase : Dict =format(len(SCREAMING_SNAKE_CASE__ ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(SCREAMING_SNAKE_CASE__ ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def a_ ( SCREAMING_SNAKE_CASE__ : bytes ):
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE__ ) % 512 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(SCREAMING_SNAKE_CASE__ ) , 512 ):
_lowerCamelCase : str =bit_string[pos : pos + 512]
_lowerCamelCase : Tuple =[]
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def a_ ( SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
if i < 0:
raise ValueError('Input must be non-negative' )
_lowerCamelCase : List[str] =format(SCREAMING_SNAKE_CASE__ , '032b' )
_lowerCamelCase : List[str] =''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(SCREAMING_SNAKE_CASE__ , 2 )
def a_ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
return (a + b) % 2**32
def a_ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def a_ ( SCREAMING_SNAKE_CASE__ : bytes ):
'''simple docstring'''
_lowerCamelCase : int =preprocess(SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : Dict =[int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
_lowerCamelCase : str =0X67_45_23_01
_lowerCamelCase : Union[str, Any] =0Xef_cd_ab_89
_lowerCamelCase : List[str] =0X98_ba_dc_fe
_lowerCamelCase : int =0X10_32_54_76
_lowerCamelCase : Optional[int] =[
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase : Union[str, Any] =aa
_lowerCamelCase : List[Any] =ba
_lowerCamelCase : Dict =ca
_lowerCamelCase : List[Any] =da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_lowerCamelCase : Optional[Any] =d ^ (b & (c ^ d))
_lowerCamelCase : Dict =i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_lowerCamelCase : Optional[int] =c ^ (d & (b ^ c))
_lowerCamelCase : List[str] =(5 * i + 1) % 16
elif i <= 47:
_lowerCamelCase : str =b ^ c ^ d
_lowerCamelCase : int =(3 * i + 5) % 16
else:
_lowerCamelCase : Optional[Any] =c ^ (b | not_aa(SCREAMING_SNAKE_CASE__ ))
_lowerCamelCase : Dict =(7 * i) % 16
_lowerCamelCase : Any =(f + a + added_consts[i] + block_words[g]) % 2**32
_lowerCamelCase : str =d
_lowerCamelCase : Any =c
_lowerCamelCase : Union[str, Any] =b
_lowerCamelCase : Dict =sum_aa(SCREAMING_SNAKE_CASE__ , left_rotate_aa(SCREAMING_SNAKE_CASE__ , shift_amounts[i] ) )
# Add hashed chunk to running total
_lowerCamelCase : Optional[Any] =sum_aa(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : List[str] =sum_aa(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : Optional[int] =sum_aa(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : List[str] =sum_aa(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : List[Any] =reformat_hex(SCREAMING_SNAKE_CASE__ ) + reformat_hex(SCREAMING_SNAKE_CASE__ ) + reformat_hex(SCREAMING_SNAKE_CASE__ ) + reformat_hex(SCREAMING_SNAKE_CASE__ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 199 | 1 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
__A =logging.getLogger(__name__)
@dataclass(frozen=a__ )
class _snake_case :
lowerCAmelCase :str
lowerCAmelCase :str
lowerCAmelCase :Optional[str] = None
lowerCAmelCase :Optional[str] = None
lowerCAmelCase :Optional[str] = None
@dataclass(frozen=a__ )
class _snake_case :
lowerCAmelCase :List[int]
lowerCAmelCase :Optional[List[int]] = None
lowerCAmelCase :Optional[List[int]] = None
lowerCAmelCase :Optional[Union[int, float]] = None
lowerCAmelCase :Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class _snake_case ( a__ ):
lowerCAmelCase :List[InputFeatures]
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase=False , _lowerCamelCase = False , ):
UpperCAmelCase__ : Optional[Any] = hans_processors[task]()
UpperCAmelCase__ : List[str] = os.path.join(
_lowerCamelCase , """cached_{}_{}_{}_{}""".format(
"""dev""" if evaluate else """train""" , tokenizer.__class__.__name__ , str(_lowerCamelCase) , _lowerCamelCase , ) , )
UpperCAmelCase__ : List[Any] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase__ : str = label_list[2], label_list[1]
UpperCAmelCase__ : Optional[Any] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCAmelCase__ : Dict = cached_features_file + """.lock"""
with FileLock(_lowerCamelCase):
if os.path.exists(_lowerCamelCase) and not overwrite_cache:
logger.info(f'''Loading features from cached file {cached_features_file}''')
UpperCAmelCase__ : Union[str, Any] = torch.load(_lowerCamelCase)
else:
logger.info(f'''Creating features from dataset file at {data_dir}''')
UpperCAmelCase__ : Dict = (
processor.get_dev_examples(_lowerCamelCase) if evaluate else processor.get_train_examples(_lowerCamelCase)
)
logger.info("""Training examples: %s""" , len(_lowerCamelCase))
UpperCAmelCase__ : Dict = hans_convert_examples_to_features(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
logger.info("""Saving features into cached file %s""" , _lowerCamelCase)
torch.save(self.features , _lowerCamelCase)
def __len__( self):
return len(self.features)
def __getitem__( self , _lowerCamelCase):
return self.features[i]
def snake_case__ ( self):
return self.label_list
if is_tf_available():
import tensorflow as tf
class _snake_case :
lowerCAmelCase :List[InputFeatures]
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 128 , _lowerCamelCase=False , _lowerCamelCase = False , ):
UpperCAmelCase__ : List[Any] = hans_processors[task]()
UpperCAmelCase__ : List[str] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase__ : Optional[int] = label_list[2], label_list[1]
UpperCAmelCase__ : str = label_list
UpperCAmelCase__ : List[Any] = processor.get_dev_examples(_lowerCamelCase) if evaluate else processor.get_train_examples(_lowerCamelCase)
UpperCAmelCase__ : Any = hans_convert_examples_to_features(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features) , desc="""convert examples to features"""):
if ex_index % 1_0000 == 0:
logger.info("""Writing example %d of %d""" % (ex_index, len(_lowerCamelCase)))
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCAmelCase__ : List[Any] = tf.data.Dataset.from_generator(
_lowerCamelCase , (
{
"""example_id""": tf.intaa,
"""input_ids""": tf.intaa,
"""attention_mask""": tf.intaa,
"""token_type_ids""": tf.intaa,
},
tf.intaa,
) , (
{
"""example_id""": tf.TensorShape([]),
"""input_ids""": tf.TensorShape([None, None]),
"""attention_mask""": tf.TensorShape([None, None]),
"""token_type_ids""": tf.TensorShape([None, None]),
},
tf.TensorShape([]),
) , )
def snake_case__ ( self):
return self.dataset
def __len__( self):
return len(self.features)
def __getitem__( self , _lowerCamelCase):
return self.features[i]
def snake_case__ ( self):
return self.label_list
class _snake_case ( a__ ):
def snake_case__ ( self , _lowerCamelCase):
return self._create_examples(self._read_tsv(os.path.join(_lowerCamelCase , """heuristics_train_set.txt""")) , """train""")
def snake_case__ ( self , _lowerCamelCase):
return self._create_examples(self._read_tsv(os.path.join(_lowerCamelCase , """heuristics_evaluation_set.txt""")) , """dev""")
def snake_case__ ( self):
return ["contradiction", "entailment", "neutral"]
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : Dict = []
for i, line in enumerate(_lowerCamelCase):
if i == 0:
continue
UpperCAmelCase__ : Dict = """%s-%s""" % (set_type, line[0])
UpperCAmelCase__ : Union[str, Any] = line[5]
UpperCAmelCase__ : Any = line[6]
UpperCAmelCase__ : Any = line[7][2:] if line[7].startswith("""ex""") else line[7]
UpperCAmelCase__ : int = line[0]
examples.append(InputExample(guid=_lowerCamelCase , text_a=_lowerCamelCase , text_b=_lowerCamelCase , label=_lowerCamelCase , pairID=_lowerCamelCase))
return examples
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ):
UpperCAmelCase__ : Tuple = {label: i for i, label in enumerate(UpperCamelCase__ )}
UpperCAmelCase__ : Optional[Any] = []
for ex_index, example in tqdm.tqdm(enumerate(UpperCamelCase__ ) , desc="""convert examples to features""" ):
if ex_index % 1_0_0_0_0 == 0:
logger.info("""Writing example %d""" % (ex_index) )
UpperCAmelCase__ : List[Any] = tokenizer(
example.text_a , example.text_b , add_special_tokens=UpperCamelCase__ , max_length=UpperCamelCase__ , padding="""max_length""" , truncation=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , )
UpperCAmelCase__ : Optional[Any] = label_map[example.label] if example.label in label_map else 0
UpperCAmelCase__ : List[Any] = int(example.pairID )
features.append(InputFeatures(**UpperCamelCase__ , label=UpperCamelCase__ , pairID=UpperCamelCase__ ) )
for i, example in enumerate(examples[:5] ):
logger.info("""*** Example ***""" )
logger.info(f'''guid: {example}''' )
logger.info(f'''features: {features[i]}''' )
return features
__A ={
'hans': 3,
}
__A ={
'hans': HansProcessor,
} | 367 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__A =logging.get_logger(__name__)
__A ='▁'
__A ={'vocab_file': 'sentencepiece.bpe.model'}
__A ={
'vocab_file': {
'facebook/mbart-large-50-one-to-many-mmt': (
'https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model'
),
}
}
__A ={
'facebook/mbart-large-50-one-to-many-mmt': 10_24,
}
# fmt: off
__A =['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN', 'af_ZA', 'az_AZ', 'bn_IN', 'fa_IR', 'he_IL', 'hr_HR', 'id_ID', 'ka_GE', 'km_KH', 'mk_MK', 'ml_IN', 'mn_MN', 'mr_IN', 'pl_PL', 'ps_AF', 'pt_XX', 'sv_SE', 'sw_KE', 'ta_IN', 'te_IN', 'th_TH', 'tl_XX', 'uk_UA', 'ur_PK', 'xh_ZA', 'gl_ES', 'sl_SI']
class _snake_case ( a__ ):
lowerCAmelCase :int = VOCAB_FILES_NAMES
lowerCAmelCase :Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase :str = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase :List[str] = ['''input_ids''', '''attention_mask''']
lowerCAmelCase :List[int] = []
lowerCAmelCase :List[int] = []
def __init__( self , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , _lowerCamelCase = None , **_lowerCamelCase , ):
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase__ : List[str] = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase) if isinstance(_lowerCamelCase , _lowerCamelCase) else mask_token
UpperCAmelCase__ : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCAmelCase__ : Dict = kwargs.get("""additional_special_tokens""" , [])
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=_lowerCamelCase , tgt_lang=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
UpperCAmelCase__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(_lowerCamelCase))
UpperCAmelCase__ : int = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCAmelCase__ : Union[str, Any] = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCAmelCase__ : List[str] = 1
UpperCAmelCase__ : Optional[int] = len(self.sp_model)
UpperCAmelCase__ : List[str] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_lowerCamelCase)
}
UpperCAmelCase__ : Optional[int] = {v: k for k, v in self.lang_code_to_id.items()}
UpperCAmelCase__ : Optional[int] = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id)
UpperCAmelCase__ : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
UpperCAmelCase__ : Dict = src_lang if src_lang is not None else """en_XX"""
UpperCAmelCase__ : str = self.lang_code_to_id[self._src_lang]
UpperCAmelCase__ : Tuple = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def snake_case__ ( self):
return len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def snake_case__ ( self):
return self._src_lang
@src_lang.setter
def snake_case__ ( self , _lowerCamelCase):
UpperCAmelCase__ : str = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def __getstate__( self):
UpperCAmelCase__ : int = self.__dict__.copy()
UpperCAmelCase__ : Tuple = None
return state
def __setstate__( self , _lowerCamelCase):
UpperCAmelCase__ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs"""):
UpperCAmelCase__ : Optional[Any] = {}
UpperCAmelCase__ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def snake_case__ ( self):
UpperCAmelCase__ : Dict = {self.convert_ids_to_tokens(_lowerCamelCase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def snake_case__ ( self , _lowerCamelCase):
return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase)
def snake_case__ ( self , _lowerCamelCase):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase__ : Dict = self.sp_model.PieceToId(_lowerCamelCase)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def snake_case__ ( self , _lowerCamelCase):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def snake_case__ ( self , _lowerCamelCase):
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : List[Any] = """"""
UpperCAmelCase__ : Optional[int] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_lowerCamelCase) + token
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : List[str] = []
else:
current_sub_tokens.append(_lowerCamelCase)
UpperCAmelCase__ : Dict = False
out_string += self.sp_model.decode(_lowerCamelCase)
return out_string.strip()
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = None):
if not os.path.isdir(_lowerCamelCase):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
UpperCAmelCase__ : Any = os.path.join(
_lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
if os.path.abspath(self.vocab_file) != os.path.abspath(_lowerCamelCase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , _lowerCamelCase)
elif not os.path.isfile(self.vocab_file):
with open(_lowerCamelCase , """wb""") as fi:
UpperCAmelCase__ : List[str] = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase)
return (out_vocab_file,)
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase)
UpperCAmelCase__ : Tuple = [1] * len(self.prefix_tokens)
UpperCAmelCase__ : Dict = [1] * len(self.suffix_tokens)
if token_ids_a is None:
return prefix_ones + ([0] * len(_lowerCamelCase)) + suffix_ones
return prefix_ones + ([0] * len(_lowerCamelCase)) + ([0] * len(_lowerCamelCase)) + suffix_ones
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = None):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""")
UpperCAmelCase__ : Any = src_lang
UpperCAmelCase__ : Dict = self(_lowerCamelCase , add_special_tokens=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase)
UpperCAmelCase__ : List[Any] = self.convert_tokens_to_ids(_lowerCamelCase)
UpperCAmelCase__ : List[str] = tgt_lang_id
return inputs
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = "en_XX" , _lowerCamelCase = None , _lowerCamelCase = "ro_RO" , **_lowerCamelCase , ):
UpperCAmelCase__ : Tuple = src_lang
UpperCAmelCase__ : Optional[Any] = tgt_lang
return super().prepare_seqaseq_batch(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase)
def snake_case__ ( self):
return self.set_src_lang_special_tokens(self.src_lang)
def snake_case__ ( self):
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def snake_case__ ( self , _lowerCamelCase):
UpperCAmelCase__ : Tuple = self.lang_code_to_id[src_lang]
UpperCAmelCase__ : Optional[int] = [self.cur_lang_code_id]
UpperCAmelCase__ : Optional[int] = [self.eos_token_id]
def snake_case__ ( self , _lowerCamelCase):
UpperCAmelCase__ : List[Any] = self.lang_code_to_id[tgt_lang]
UpperCAmelCase__ : Dict = [self.cur_lang_code_id]
UpperCAmelCase__ : Optional[int] = [self.eos_token_id] | 283 | 0 |
'''simple docstring'''
__A = range(2, 20 + 1)
__A = [10**k for k in range(ks[-1] + 1)]
__A = {}
def _A ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
lowercase__ = sum(a_i[j] for j in range(lowercase__ , len(lowercase__ ) ) )
lowercase__ = sum(a_i[j] * base[j] for j in range(min(len(lowercase__ ) , lowercase__ ) ) )
lowercase__ , lowercase__ = 0, 0
lowercase__ = n - i
lowercase__ = memo.get(lowercase__ )
if sub_memo is not None:
lowercase__ = sub_memo.get(lowercase__ )
if jumps is not None and len(lowercase__ ) > 0:
# find and make the largest jump without going over
lowercase__ = -1
for _k in range(len(lowercase__ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
lowercase__ = _k
break
if max_jump >= 0:
lowercase__ , lowercase__ , lowercase__ = jumps[max_jump]
# since the difference between jumps is cached, add c
lowercase__ = diff + c
for j in range(min(lowercase__ , len(lowercase__ ) ) ):
lowercase__ , lowercase__ = divmod(lowercase__ , 10 )
if new_c > 0:
add(lowercase__ , lowercase__ , lowercase__ )
else:
lowercase__ = []
else:
lowercase__ = {c: []}
lowercase__ = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
lowercase__ , lowercase__ = next_term(lowercase__ , k - 1 , i + dn , lowercase__ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
lowercase__ , lowercase__ = compute(lowercase__ , lowercase__ , i + dn , lowercase__ )
diff += _diff
dn += terms_jumped
lowercase__ = sub_memo[c]
# keep jumps sorted by # of terms skipped
lowercase__ = 0
while j < len(lowercase__ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(lowercase__ , (diff, dn, k) )
return (diff, dn)
def _A ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
if i >= n:
return 0, i
if k > len(lowercase__ ):
a_i.extend([0 for _ in range(k - len(lowercase__ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
lowercase__ = i
lowercase__ , lowercase__ , lowercase__ = 0, 0, 0
for j in range(len(lowercase__ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
lowercase__ = ds_c + ds_b
diff += addend
lowercase__ = 0
for j in range(lowercase__ ):
lowercase__ = a_i[j] + addend
lowercase__ , lowercase__ = divmod(lowercase__ , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(lowercase__ , lowercase__ , lowercase__ )
return diff, i - start_i
def _A ( lowercase__ , lowercase__ , lowercase__ ):
for j in range(lowercase__ , len(lowercase__ ) ):
lowercase__ = digits[j] + addend
if s >= 10:
lowercase__ , lowercase__ = divmod(lowercase__ , 10 )
lowercase__ = addend // 10 + quotient
else:
lowercase__ = s
lowercase__ = addend // 10
if addend == 0:
break
while addend > 0:
lowercase__ , lowercase__ = divmod(lowercase__ , 10 )
digits.append(lowercase__ )
def _A ( lowercase__ = 10**15 ):
lowercase__ = [1]
lowercase__ = 1
lowercase__ = 0
while True:
lowercase__ , lowercase__ = next_term(lowercase__ , 20 , i + dn , lowercase__ )
dn += terms_jumped
if dn == n - i:
break
lowercase__ = 0
for j in range(len(lowercase__ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F'''{solution() = }''')
| 164 |
'''simple docstring'''
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
__A = "sshleifer/mar_enro_6_3_student"
class A ( __UpperCAmelCase ):
def A__ ( self ) -> List[Any]:
'''simple docstring'''
super().setUp()
lowercase__ = cached_path(
"""https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz""" , extract_compressed_file=lowerCamelCase__ , )
lowercase__ = F'''{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'''
@slow
@require_torch_gpu
def A__ ( self ) -> str:
'''simple docstring'''
MarianMTModel.from_pretrained(lowerCamelCase__ )
@slow
@require_torch_gpu
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__ = {
"""$MAX_LEN""": 64,
"""$BS""": 64,
"""$GAS""": 1,
"""$ENRO_DIR""": self.data_dir,
"""facebook/mbart-large-cc25""": MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
"""--learning_rate=3e-5""": """--learning_rate 3e-4""",
"""--num_train_epochs 6""": """--num_train_epochs 1""",
}
# Clean up bash script
lowercase__ = (self.test_file_dir / """train_mbart_cc25_enro.sh""").open().read().split("""finetune.py""" )[1].strip()
lowercase__ = bash_script.replace("""\\\n""" , """""" ).strip().replace("""\"$@\"""" , """""" )
for k, v in env_vars_to_replace.items():
lowercase__ = bash_script.replace(lowerCamelCase__ , str(lowerCamelCase__ ) )
lowercase__ = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
lowercase__ = F'''
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
'''.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
lowercase__ = ["""finetune.py"""] + bash_script.split() + args
with patch.object(lowerCamelCase__ , """argv""" , lowerCamelCase__ ):
lowercase__ = argparse.ArgumentParser()
lowercase__ = pl.Trainer.add_argparse_args(lowerCamelCase__ )
lowercase__ = SummarizationModule.add_model_specific_args(lowerCamelCase__ , os.getcwd() )
lowercase__ = parser.parse_args()
lowercase__ = main(lowerCamelCase__ )
# Check metrics
lowercase__ = load_json(model.metrics_save_path )
lowercase__ = metrics["""val"""][0]
lowercase__ = metrics["""val"""][-1]
self.assertEqual(len(metrics["""val"""] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , lowerCamelCase__ )
self.assertGreater(last_step_stats["""val_avg_gen_time"""] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats["""val_avg_gen_time"""] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats["""val_avg_bleu"""] - first_step_stats["""val_avg_bleu"""] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats["""val_avg_bleu"""] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics["""val"""][-1]["""val_avg_bleu"""] - metrics["""test"""][-1]["""test_avg_bleu"""] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
lowercase__ = os.listdir(lowerCamelCase__ )
lowercase__ = [x for x in contents if x.endswith(""".ckpt""" )][0]
lowercase__ = os.path.join(args.output_dir , lowerCamelCase__ )
lowercase__ = torch.load(lowerCamelCase__ , map_location="""cpu""" )
lowercase__ = """model.model.decoder.layers.0.encoder_attn_layer_norm.weight"""
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
lowercase__ = {os.path.basename(lowerCamelCase__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["""test"""] ) == 1
class A ( __UpperCAmelCase ):
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase__ = F'''{self.test_file_dir_str}/test_data/wmt_en_ro'''
lowercase__ = {
"""--fp16_opt_level=O1""": """""",
"""$MAX_LEN""": 128,
"""$BS""": 16,
"""$GAS""": 1,
"""$ENRO_DIR""": data_dir,
"""$m""": """sshleifer/student_marian_en_ro_6_1""",
"""val_check_interval=0.25""": """val_check_interval=1.0""",
}
# Clean up bash script
lowercase__ = (
(self.test_file_dir / """distil_marian_no_teacher.sh""").open().read().split("""distillation.py""" )[1].strip()
)
lowercase__ = bash_script.replace("""\\\n""" , """""" ).strip().replace("""\"$@\"""" , """""" )
lowercase__ = bash_script.replace("""--fp16 """ , """ """ )
for k, v in env_vars_to_replace.items():
lowercase__ = bash_script.replace(lowerCamelCase__ , str(lowerCamelCase__ ) )
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = bash_script.replace("""--fp16""" , """""" )
lowercase__ = 6
lowercase__ = (
["""distillation.py"""]
+ bash_script.split()
+ [
F'''--output_dir={output_dir}''',
"""--gpus=1""",
"""--learning_rate=1e-3""",
F'''--num_train_epochs={epochs}''',
"""--warmup_steps=10""",
"""--val_check_interval=1.0""",
"""--do_predict""",
]
)
with patch.object(lowerCamelCase__ , """argv""" , lowerCamelCase__ ):
lowercase__ = argparse.ArgumentParser()
lowercase__ = pl.Trainer.add_argparse_args(lowerCamelCase__ )
lowercase__ = SummarizationDistiller.add_model_specific_args(lowerCamelCase__ , os.getcwd() )
lowercase__ = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
lowercase__ = distill_main(lowerCamelCase__ )
# Check metrics
lowercase__ = load_json(model.metrics_save_path )
lowercase__ = metrics["""val"""][0]
lowercase__ = metrics["""val"""][-1]
assert len(metrics["""val"""] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , lowerCamelCase__ )
# check lightning ckpt can be loaded and has a reasonable statedict
lowercase__ = os.listdir(lowerCamelCase__ )
lowercase__ = [x for x in contents if x.endswith(""".ckpt""" )][0]
lowercase__ = os.path.join(args.output_dir , lowerCamelCase__ )
lowercase__ = torch.load(lowerCamelCase__ , map_location="""cpu""" )
lowercase__ = """model.model.decoder.layers.0.encoder_attn_layer_norm.weight"""
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
lowercase__ = {os.path.basename(lowerCamelCase__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["""test"""] ) == 1
| 164 | 1 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
UpperCAmelCase_ : Optional[Any] = 16
UpperCAmelCase_ : Tuple = 32
def UpperCamelCase ( _A : Accelerator , _A : int = 16 , _A : str = "bert-base-cased" )-> Optional[Any]:
"""simple docstring"""
A__ = AutoTokenizer.from_pretrained(_A )
A__ = load_dataset("glue" , "mrpc" )
def tokenize_function(_A : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
A__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_A , max_length=_A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A__ = datasets.map(
_A , batched=_A , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=_A )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_A : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_A , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(_A , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
A__ = DataLoader(
tokenized_datasets["train"] , shuffle=_A , collate_fn=_A , batch_size=_A )
A__ = DataLoader(
tokenized_datasets["validation"] , shuffle=_A , collate_fn=_A , batch_size=_A )
return train_dataloader, eval_dataloader
def UpperCamelCase ( _A : Dict , _A : str , _A : List[Any] , _A : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
model.eval()
A__ = 0
for step, batch in enumerate(_A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ = model(**_A )
A__ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
A__ , A__ = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_A ) - 1:
A__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
A__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_A , references=_A , )
A__ = metric.compute()
return eval_metric["accuracy"]
def UpperCamelCase ( _A : Optional[int] , _A : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
A__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ = config["lr"]
A__ = int(config["num_epochs"] )
A__ = int(config["seed"] )
A__ = int(config["batch_size"] )
A__ = args.model_name_or_path
set_seed(_A )
A__ , A__ = get_dataloaders(_A , _A , _A )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ = AutoModelForSequenceClassification.from_pretrained(_A , return_dict=_A )
# Instantiate optimizer
A__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
A__ = optimizer_cls(params=model.parameters() , lr=_A )
if accelerator.state.deepspeed_plugin is not None:
A__ = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
A__ = 1
A__ = (len(_A ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
A__ = get_linear_schedule_with_warmup(
optimizer=_A , num_warmup_steps=0 , num_training_steps=_A , )
else:
A__ = DummyScheduler(_A , total_num_steps=_A , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ , A__ , A__ , A__ , A__ = accelerator.prepare(
_A , _A , _A , _A , _A )
# We need to keep track of how many total steps we have iterated over
A__ = 0
# We also need to keep track of the stating epoch so files are named properly
A__ = 0
A__ = evaluate.load("glue" , "mrpc" )
A__ = num_epochs
if args.partial_train_epoch is not None:
A__ = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
A__ = args.resume_from_checkpoint.split("epoch_" )[1]
A__ = ""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
A__ = int(_A ) + 1
A__ = evaluation_loop(_A , _A , _A , _A )
accelerator.print("resumed checkpoint performance:" , _A )
accelerator.print("resumed checkpoint's scheduler's lr:" , lr_scheduler.get_lr()[0] )
accelerator.print("resumed optimizers's lr:" , optimizer.param_groups[0]["lr"] )
with open(os.path.join(args.output_dir , f"""state_{starting_epoch-1}.json""" ) , "r" ) as f:
A__ = json.load(_A )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
A__ = {}
for epoch in range(_A , _A ):
model.train()
for step, batch in enumerate(_A ):
A__ = model(**_A )
A__ = outputs.loss
A__ = loss / gradient_accumulation_steps
accelerator.backward(_A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
A__ = f"""epoch_{epoch}"""
A__ = os.path.join(args.output_dir , _A )
accelerator.save_state(_A )
A__ = evaluation_loop(_A , _A , _A , _A )
A__ = accuracy
A__ = lr_scheduler.get_lr()[0]
A__ = optimizer.param_groups[0]["lr"]
A__ = epoch
A__ = overall_step
accelerator.print(f"""epoch {epoch}:""" , _A )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f"""state_{epoch}.json""" ) , "w" ) as f:
json.dump(_A , _A )
def UpperCamelCase ( )-> Union[str, Any]:
"""simple docstring"""
A__ = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=_A , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=_A , )
parser.add_argument(
"--output_dir" , type=_A , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=_A , default=_A , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--partial_train_epoch" , type=_A , default=_A , help="If passed, the training will stop after this number of epochs." , )
parser.add_argument(
"--num_epochs" , type=_A , default=2 , help="Number of train epochs." , )
A__ = parser.parse_args()
A__ = {"lr": 2E-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(_A , _A )
if __name__ == "__main__":
main()
| 198 |
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
def UpperCamelCase ( _A : str , _A : str )-> Any:
"""simple docstring"""
A__ = RobertaPreLayerNormConfig.from_pretrained(
_A , architectures=["RobertaPreLayerNormForMaskedLM"] )
# convert state_dict
A__ = torch.load(hf_hub_download(repo_id=_A , filename="pytorch_model.bin" ) )
A__ = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("roberta." ):
A__ = "roberta_prelayernorm." + tensor_key[len("roberta." ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ):
continue
A__ = tensor_value
A__ = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=_A , config=_A , state_dict=_A )
model.save_pretrained(_A )
# convert tokenizer
A__ = AutoTokenizer.from_pretrained(_A )
tokenizer.save_pretrained(_A )
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint-repo",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase_ : List[Any] = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 198 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {
'''configuration_deberta''': ['''DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DebertaConfig''', '''DebertaOnnxConfig'''],
'''tokenization_deberta''': ['''DebertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''DebertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DebertaForMaskedLM''',
'''DebertaForQuestionAnswering''',
'''DebertaForSequenceClassification''',
'''DebertaForTokenClassification''',
'''DebertaModel''',
'''DebertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDebertaForMaskedLM''',
'''TFDebertaForQuestionAnswering''',
'''TFDebertaForSequenceClassification''',
'''TFDebertaForTokenClassification''',
'''TFDebertaModel''',
'''TFDebertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 72 |
"""simple docstring"""
lowerCAmelCase__ = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def snake_case_ ( A_ : dict, A_ : int, A_ : int ):
'''simple docstring'''
_lowerCamelCase : List[str] = set()
# keep track of all the paths to be checked
_lowerCamelCase : str = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
_lowerCamelCase : str = queue.pop(0 )
# get the last node from the path
_lowerCamelCase : List[Any] = path[-1]
if node not in explored:
_lowerCamelCase : Union[str, Any] = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
_lowerCamelCase : Union[str, Any] = list(A_ )
new_path.append(A_ )
queue.append(A_ )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(A_ )
# in case there's no path between the 2 nodes
return []
def snake_case_ ( A_ : dict, A_ : int, A_ : Dict ):
'''simple docstring'''
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
_lowerCamelCase : Optional[int] = [start]
_lowerCamelCase : int = set(A_ )
# Keep tab on distances from `start` node.
_lowerCamelCase : int = {start: 0, target: -1}
while queue:
_lowerCamelCase : Optional[Any] = queue.pop(0 )
if node == target:
_lowerCamelCase : Any = (
dist[node] if dist[target] == -1 else min(dist[target], dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(A_ )
queue.append(A_ )
_lowerCamelCase : Any = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 72 | 1 |
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def UpperCamelCase ( _A = "" ):
"""simple docstring"""
__magic_name__ : str = url or """https://www.imdb.com/chart/top/?ref_=nv_mv_250"""
__magic_name__ : Any = BeautifulSoup(requests.get(_A ).text, """html.parser""" )
__magic_name__ : Union[str, Any] = soup.find_all("""td""", attrs="""titleColumn""" )
__magic_name__ : Dict = soup.find_all("""td""", class_="""ratingColumn imdbRating""" )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(_A, _A )
}
def UpperCamelCase ( _A = "IMDb_Top_250_Movies.csv" ):
"""simple docstring"""
__magic_name__ : Union[str, Any] = get_imdb_top_aaa_movies()
with open(_A, """w""", newline="""""" ) as out_file:
__magic_name__ : Optional[Any] = csv.writer(_A )
writer.writerow(["""Movie title""", """IMDb rating"""] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 138 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__magic_name__: Tuple = {
"configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"],
"tokenization_tapas": ["TapasTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__: Dict = [
"TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TapasForMaskedLM",
"TapasForQuestionAnswering",
"TapasForSequenceClassification",
"TapasModel",
"TapasPreTrainedModel",
"load_tf_weights_in_tapas",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__: int = [
"TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFTapasForMaskedLM",
"TFTapasForQuestionAnswering",
"TFTapasForSequenceClassification",
"TFTapasModel",
"TFTapasPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
__magic_name__: Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 138 | 1 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class __a :
'''simple docstring'''
def __init__( self , _a , _a=3 , _a=7 , _a=True , _a=True , _a=False , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parent
SCREAMING_SNAKE_CASE__ : int = batch_size
SCREAMING_SNAKE_CASE__ : Dict = seq_length
SCREAMING_SNAKE_CASE__ : int = is_training
SCREAMING_SNAKE_CASE__ : Optional[int] = use_input_mask
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_token_type_ids
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE__ : Tuple = vocab_size
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : int = num_attention_heads
SCREAMING_SNAKE_CASE__ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE__ : int = hidden_act
SCREAMING_SNAKE_CASE__ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Union[str, Any] = type_vocab_size
SCREAMING_SNAKE_CASE__ : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE__ : Dict = num_labels
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_choices
SCREAMING_SNAKE_CASE__ : Optional[int] = scope
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : int = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : int = None
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
SCREAMING_SNAKE_CASE__ : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ : str = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self ) -> str:
"""simple docstring"""
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=_a , )
def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = FalconModel(config=_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE__ : str = model(_a , attention_mask=_a )
SCREAMING_SNAKE_CASE__ : Any = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = True
SCREAMING_SNAKE_CASE__ : Dict = FalconModel(_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE__ : int = model(
_a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , )
SCREAMING_SNAKE_CASE__ : List[str] = model(
_a , attention_mask=_a , encoder_hidden_states=_a , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_a , attention_mask=_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = FalconForCausalLM(config=_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_a , attention_mask=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = True
SCREAMING_SNAKE_CASE__ : Dict = True
SCREAMING_SNAKE_CASE__ : Optional[int] = FalconForCausalLM(config=_a )
model.to(_a )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE__ : int = model(
_a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , use_cache=_a , )
SCREAMING_SNAKE_CASE__ : str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.cat([input_mask, next_mask] , dim=-1 )
SCREAMING_SNAKE_CASE__ : Tuple = model(
_a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , output_hidden_states=_a , )["""hidden_states"""][0]
SCREAMING_SNAKE_CASE__ : Tuple = model(
_a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , past_key_values=_a , output_hidden_states=_a , )["""hidden_states"""][0]
# select random slice
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE__ : Optional[int] = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE__ : str = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a , _a , atol=1E-3 ) )
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __a (UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :List[Any] = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE :Any = (FalconForCausalLM,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE :List[Any] = (
{
"""feature-extraction""": FalconModel,
"""text-classification""": FalconForSequenceClassification,
"""text-generation""": FalconForCausalLM,
"""question-answering""": FalconForQuestionAnswering,
"""token-classification""": FalconForTokenClassification,
"""zero-shot""": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE :List[str] = False
_SCREAMING_SNAKE_CASE :str = False
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = FalconModelTester(self )
SCREAMING_SNAKE_CASE__ : List[str] = ConfigTester(self , config_class=_a , hidden_size=37 )
def _a ( self ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
SCREAMING_SNAKE_CASE__ : str = alibi
self.model_tester.create_and_check_model(_a , *_a )
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : List[Any] = 3
SCREAMING_SNAKE_CASE__ : Optional[Any] = input_dict["""input_ids"""]
SCREAMING_SNAKE_CASE__ : str = input_ids.ne(1 ).to(_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = FalconForSequenceClassification(_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] = model(_a , attention_mask=_a , labels=_a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 3
SCREAMING_SNAKE_CASE__ : Optional[Any] = """single_label_classification"""
SCREAMING_SNAKE_CASE__ : List[str] = input_dict["""input_ids"""]
SCREAMING_SNAKE_CASE__ : Tuple = input_ids.ne(1 ).to(_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : List[Any] = FalconForSequenceClassification(_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE__ : str = model(_a , attention_mask=_a , labels=_a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Optional[int] = input_dict["""input_ids"""]
SCREAMING_SNAKE_CASE__ : int = FalconForCausalLM(_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict = model(_a , use_cache=_a )
SCREAMING_SNAKE_CASE__ : str = input_ids.shape[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model._convert_to_rw_cache(result.past_key_values )
SCREAMING_SNAKE_CASE__ : Any = model._convert_cache_to_standard_format(_a , _a )
for layer in range(len(_a ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : List[str] = 3
SCREAMING_SNAKE_CASE__ : str = """multi_label_classification"""
SCREAMING_SNAKE_CASE__ : int = input_dict["""input_ids"""]
SCREAMING_SNAKE_CASE__ : List[str] = input_ids.ne(1 ).to(_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE__ : int = FalconForSequenceClassification(_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE__ : List[Any] = model(_a , attention_mask=_a , labels=_a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self ) -> List[str]:
"""simple docstring"""
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(_a , """use_cache""" ):
return
SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(_a ).to(_a )
if "use_cache" not in inputs:
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
SCREAMING_SNAKE_CASE__ : Tuple = model(**_a )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
SCREAMING_SNAKE_CASE__ : Dict = (
getattr(_a , """decoder_layers""" , _a )
or getattr(_a , """num_decoder_layers""" , _a )
or config.num_hidden_layers
)
SCREAMING_SNAKE_CASE__ : Any = getattr(_a , """num_kv_heads""" , config.num_attention_heads )
SCREAMING_SNAKE_CASE__ : str = getattr(_a , """d_model""" , config.hidden_size )
SCREAMING_SNAKE_CASE__ : str = embed_dim // num_attention_heads
SCREAMING_SNAKE_CASE__ : List[Any] = outputs["""past_key_values"""]
self.assertEqual(len(_a ) , _a )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = inputs["""input_ids"""].shape
for i in range(_a ):
if config.new_decoder_architecture:
SCREAMING_SNAKE_CASE__ : Any = config.num_attention_heads
elif config.multi_query:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class __a (unittest.TestCase):
'''simple docstring'''
@slow
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoTokenizer.from_pretrained("""Rocketknight1/falcon-rw-1b""" )
SCREAMING_SNAKE_CASE__ : Tuple = FalconForCausalLM.from_pretrained("""Rocketknight1/falcon-rw-1b""" )
model.eval()
model.to(_a )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
"""My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday."""
)
SCREAMING_SNAKE_CASE__ : Any = model.generate(**_a , do_sample=_a , max_new_tokens=19 )
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.batch_decode(_a )[0]
self.assertEqual(_a , _a )
@slow
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
SCREAMING_SNAKE_CASE__ : Any = AutoTokenizer.from_pretrained(_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = FalconForCausalLM.from_pretrained(_a )
model.eval()
model.to(_a )
SCREAMING_SNAKE_CASE__ : str = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(_a )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**_a , do_sample=_a , max_new_tokens=4 )
model.generate(**_a , do_sample=_a , max_new_tokens=4 )
model.generate(**_a , num_beams=2 , max_new_tokens=4 )
@slow
def _a ( self ) -> Optional[int]:
"""simple docstring"""
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoTokenizer.from_pretrained(_a )
SCREAMING_SNAKE_CASE__ : int = FalconForCausalLM.from_pretrained(_a )
model.eval()
model.to(device=_a )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(_a )
# Test results are the same with and without cache
SCREAMING_SNAKE_CASE__ : Tuple = model.generate(**_a , do_sample=_a , max_new_tokens=20 , use_cache=_a )
SCREAMING_SNAKE_CASE__ : Tuple = model.generate(**_a , do_sample=_a , max_new_tokens=20 , use_cache=_a )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 132 |
"""simple docstring"""
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Union[List[PIL.Image.Image], np.ndarray]
_SCREAMING_SNAKE_CASE :Optional[List[bool]]
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 132 | 1 |
snake_case : List[str] = {'''a''': ['''c''', '''b'''], '''b''': ['''d''', '''e'''], '''c''': [], '''d''': [], '''e''': []}
snake_case : Tuple = ['''a''', '''b''', '''c''', '''d''', '''e''']
def __lowerCamelCase ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str ):
"""simple docstring"""
a :Any = start
# add current to visited
visited.append(UpperCAmelCase_ )
a :List[Any] = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
a :Optional[Any] = topological_sort(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# if all neighbors visited add current to sort
sort.append(UpperCAmelCase_ )
# if all vertices haven't been visited select a new one to visit
if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ):
for vertice in vertices:
if vertice not in visited:
a :Any = topological_sort(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# return sort
return sort
if __name__ == "__main__":
snake_case : Union[str, Any] = topological_sort('''a''', [], [])
print(sort)
| 365 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
snake_case : List[Any] = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
class _snake_case ( _snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = BartphoTokenizer
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = True
def SCREAMING_SNAKE_CASE__ ( self ):
super().setUp()
a :Dict = ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']
a :Optional[Any] = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
a :Tuple = {'''unk_token''': '''<unk>'''}
a :Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''monolingual_vocab_file'''] )
with open(self.monolingual_vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(F'''{token} {vocab_tokens[token]}\n''' )
a :Any = BartphoTokenizer(_lowerCamelCase , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self , **_lowerCamelCase ):
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :int = '''This is a là test'''
a :str = '''This is a<unk><unk> test'''
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[str] = BartphoTokenizer(_lowerCamelCase , self.monolingual_vocab_file , **self.special_tokens_map )
a :Optional[Any] = '''This is a là test'''
a :Tuple = '''▁This ▁is ▁a ▁l à ▁t est'''.split()
a :int = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
a :Union[str, Any] = tokens + [tokenizer.unk_token]
a :str = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase )
| 281 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __A( a ):
snake_case_ = ['''image_processor''', '''tokenizer''']
snake_case_ = '''ChineseCLIPImageProcessor'''
snake_case_ = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , _snake_case=None , _snake_case=None , **_snake_case ) -> Tuple:
'''simple docstring'''
__a = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _snake_case , )
__a = kwargs.pop('''feature_extractor''' )
__a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_snake_case , _snake_case )
__a = self.image_processor
def __call__( self , _snake_case=None , _snake_case=None , _snake_case=None , **_snake_case ) -> Optional[Any]:
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__a = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case )
if images is not None:
__a = self.image_processor(_snake_case , return_tensors=_snake_case , **_snake_case )
if text is not None and images is not None:
__a = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_snake_case ) , tensor_type=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , *_snake_case , **_snake_case ) -> str:
'''simple docstring'''
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , *_snake_case , **_snake_case ) -> Dict:
'''simple docstring'''
return self.tokenizer.decode(*_snake_case , **_snake_case )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
__a = self.tokenizer.model_input_names
__a = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _snake_case , )
return self.image_processor_class | 6 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCamelCase : Union[str, Any] =logging.get_logger(__name__)
lowerCamelCase : str ={
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
lowerCamelCase : str ={
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
lowerCamelCase : str ={'''facebook/blenderbot-3B''': 128}
class __a ( A__ ):
_lowerCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES
_lowerCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase : int = ['''input_ids''', '''attention_mask''']
_lowerCAmelCase : Tuple = BlenderbotTokenizer
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : Tuple="replace" , SCREAMING_SNAKE_CASE : Union[str, Any]="<s>" , SCREAMING_SNAKE_CASE : str="</s>" , SCREAMING_SNAKE_CASE : str="</s>" , SCREAMING_SNAKE_CASE : Tuple="<s>" , SCREAMING_SNAKE_CASE : Union[str, Any]="<unk>" , SCREAMING_SNAKE_CASE : str="<pad>" , SCREAMING_SNAKE_CASE : Union[str, Any]="<mask>" , SCREAMING_SNAKE_CASE : Any=False , SCREAMING_SNAKE_CASE : List[Any]=True , **SCREAMING_SNAKE_CASE : int , ):
'''simple docstring'''
super().__init__(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , tokenizer_file=SCREAMING_SNAKE_CASE , errors=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE , trim_offsets=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
UpperCamelCase__ : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , SCREAMING_SNAKE_CASE ) != add_prefix_space:
UpperCamelCase__ : str = getattr(SCREAMING_SNAKE_CASE , pre_tok_state.pop("type" ) )
UpperCamelCase__ : Union[str, Any] = add_prefix_space
UpperCamelCase__ : Union[str, Any] = pre_tok_class(**SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = add_prefix_space
UpperCamelCase__ : Optional[int] = "post_processor"
UpperCamelCase__ : Union[str, Any] = getattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if tokenizer_component_instance:
UpperCamelCase__ : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCamelCase__ : Tuple = tuple(state["sep"] )
if "cls" in state:
UpperCamelCase__ : Optional[int] = tuple(state["cls"] )
UpperCamelCase__ : List[Any] = False
if state.get("add_prefix_space" , SCREAMING_SNAKE_CASE ) != add_prefix_space:
UpperCamelCase__ : str = add_prefix_space
UpperCamelCase__ : Optional[Any] = True
if state.get("trim_offsets" , SCREAMING_SNAKE_CASE ) != trim_offsets:
UpperCamelCase__ : Optional[Any] = trim_offsets
UpperCamelCase__ : Tuple = True
if changes_to_apply:
UpperCamelCase__ : Optional[int] = getattr(SCREAMING_SNAKE_CASE , state.pop("type" ) )
UpperCamelCase__ : Dict = component_class(**SCREAMING_SNAKE_CASE )
setattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def __lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ : Dict = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else value
UpperCamelCase__ : str = value
def __lowercase ( self : Any , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = kwargs.get("is_split_into_words" , SCREAMING_SNAKE_CASE )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowercase ( self : Dict , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
UpperCamelCase__ : Dict = kwargs.get("is_split_into_words" , SCREAMING_SNAKE_CASE )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[str] = None ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE , name=SCREAMING_SNAKE_CASE )
return tuple(SCREAMING_SNAKE_CASE )
def __lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = [self.sep_token_id]
UpperCamelCase__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None ):
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def __lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : "Conversation" ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = " ".join(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = self.encode(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > self.model_max_length:
UpperCamelCase__ : Dict = input_ids[-self.model_max_length :]
logger.warning(F'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' )
return input_ids | 189 | 0 |
'''simple docstring'''
from __future__ import annotations
import time
import numpy as np
A__ : str = [8, 5, 9, 7]
A__ : List[str] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
A__ : Dict = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class snake_case__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , __a : list[int] , __a : list[list[int]] , __a : list[list[int]] , ) -> None:
'''simple docstring'''
__snake_case : int = claim_vector
__snake_case : Optional[int] = allocated_resources_table
__snake_case : List[str] = maximum_claim_table
def A_ ( self : str ) -> list[int]:
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def A_ ( self : int ) -> list[int]:
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def A_ ( self : int ) -> list[list[int]]:
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__a ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def A_ ( self : str ) -> dict[int, list[int]]:
'''simple docstring'''
return {self.__need().index(__a ): i for i in self.__need()}
def A_ ( self : Union[str, Any] , **__a : int ) -> None:
'''simple docstring'''
__snake_case : str = self.__need()
__snake_case : List[Any] = self.__allocated_resources_table
__snake_case : Optional[int] = self.__available_resources()
__snake_case : Union[str, Any] = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('_' * 50 + '\n' )
while need_list:
__snake_case : Tuple = False
for each_need in need_list:
__snake_case : Any = True
for index, need in enumerate(__a ):
if need > available_resources[index]:
__snake_case : List[str] = False
break
if execution:
__snake_case : Union[str, Any] = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__snake_case : str = original_need_index
print(f'''Process {process_number + 1} is executing.''' )
# remove the process run from stack
need_list.remove(__a )
# update available/freed resources stack
__snake_case : Union[str, Any] = np.array(__a ) + np.array(
alloc_resources_table[process_number] )
print(
'Updated available resource stack for processes: '
+ ' '.join([str(__a ) for x in available_resources] ) )
break
if safe:
print('The process is in a safe state.\n' )
else:
print('System in unsafe state. Aborting...\n' )
break
def A_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
print(' ' * 9 + 'Allocated Resource Table' )
for item in self.__allocated_resources_table:
print(
f'''P{self.__allocated_resources_table.index(__a ) + 1}'''
+ ' '.join(f'''{it:>8}''' for it in item )
+ '\n' )
print(' ' * 9 + 'System Resource Table' )
for item in self.__maximum_claim_table:
print(
f'''P{self.__maximum_claim_table.index(__a ) + 1}'''
+ ' '.join(f'''{it:>8}''' for it in item )
+ '\n' )
print(
'Current Usage by Active Processes: '
+ ' '.join(str(__a ) for x in self.__claim_vector ) )
print(
'Initial Available Resources: '
+ ' '.join(str(__a ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350 |
'''simple docstring'''
from __future__ import annotations
A__ : str = '''Muhammad Umer Farooq'''
A__ : int = '''MIT'''
A__ : Optional[int] = '''1.0.0'''
A__ : List[Any] = '''Muhammad Umer Farooq'''
A__ : Optional[Any] = '''contact@muhammadumerfarooq.me'''
A__ : Optional[Any] = '''Alpha'''
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Union[str, Any] , __a : str ) -> None:
'''simple docstring'''
super().__init__()
__snake_case : list[str] = []
__snake_case : Dict = domain
def A_ ( self : Dict , __a : str , __a : list[tuple[str, str | None]] ) -> None:
'''simple docstring'''
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
__snake_case : Optional[Any] = parse.urljoin(self.domain , __a )
self.urls.append(__a )
def a_ ( _UpperCAmelCase : str ) -> str:
return ".".join(get_sub_domain_name(_UpperCAmelCase ).split('.' )[-2:] )
def a_ ( _UpperCAmelCase : str ) -> str:
return parse.urlparse(_UpperCAmelCase ).netloc
def a_ ( _UpperCAmelCase : str = "https://github.com" ) -> list[str]:
__snake_case : List[Any] = get_domain_name(_UpperCAmelCase )
# Initialize the parser
__snake_case : Tuple = Parser(_UpperCAmelCase )
try:
# Open URL
__snake_case : Any = requests.get(_UpperCAmelCase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
__snake_case : Dict = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
__snake_case : List[Any] = requests.get(_UpperCAmelCase )
# Get the valid email.
__snake_case : Optional[Any] = re.findall('[a-zA-Z0-9]+@' + domain ,read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(_UpperCAmelCase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(_UpperCAmelCase )
if __name__ == "__main__":
A__ : Tuple = emails_from_url('''https://github.com''')
print(F"""{len(emails)} emails found:""")
print('''\n'''.join(sorted(emails)))
| 0 | 0 |
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self ) -> None:
"""simple docstring"""
_UpperCAmelCase = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
_UpperCAmelCase = Vector()
def UpperCAmelCase__ ( self ) -> None:
"""simple docstring"""
_UpperCAmelCase = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(_SCREAMING_SNAKE_CASE ) , '(0,0,0,0,0,1)' )
def UpperCAmelCase__ ( self ) -> None:
"""simple docstring"""
_UpperCAmelCase = Vector([1, 2, 3, 4] )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 4 )
def UpperCAmelCase__ ( self ) -> None:
"""simple docstring"""
_UpperCAmelCase = Vector([1, 2] )
_UpperCAmelCase = Vector([1, 2, 3, 4, 5] )
_UpperCAmelCase = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
_UpperCAmelCase = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def UpperCAmelCase__ ( self ) -> None:
"""simple docstring"""
_UpperCAmelCase = Vector([1, 2, 3] )
_UpperCAmelCase = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def UpperCAmelCase__ ( self ) -> None:
"""simple docstring"""
_UpperCAmelCase = Vector([1, 2, 3] )
_UpperCAmelCase = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def UpperCAmelCase__ ( self ) -> None:
"""simple docstring"""
_UpperCAmelCase = Vector([1, 2, 3] )
_UpperCAmelCase = Vector([2, -1, 4] ) # for test of dot product
_UpperCAmelCase = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , '(3.0,6.0,9.0)' )
self.assertEqual((a * b) , 0 )
def UpperCAmelCase__ ( self ) -> None:
"""simple docstring"""
self.assertEqual(str(zero_vector(10 ) ).count('0' ) , 10 )
def UpperCAmelCase__ ( self ) -> None:
"""simple docstring"""
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '(0,1,0)' )
def UpperCAmelCase__ ( self ) -> None:
"""simple docstring"""
_UpperCAmelCase = Vector([1, 2, 3] )
_UpperCAmelCase = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) , '(3,4,7)' )
def UpperCAmelCase__ ( self ) -> None:
"""simple docstring"""
_UpperCAmelCase = Vector([1, 0, 0, 0, 0, 0] )
_UpperCAmelCase = x.copy()
self.assertEqual(str(_SCREAMING_SNAKE_CASE ) , str(_SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase__ ( self ) -> None:
"""simple docstring"""
_UpperCAmelCase = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(_SCREAMING_SNAKE_CASE ) , '(0,1,0)' )
def UpperCAmelCase__ ( self ) -> None:
"""simple docstring"""
_UpperCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual('|1,2,3|\n|2,4,5|\n|6,7,8|\n' , str(_SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase__ ( self ) -> None:
"""simple docstring"""
_UpperCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_UpperCAmelCase = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase__ ( self ) -> None:
"""simple docstring"""
_UpperCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_UpperCAmelCase = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase__ ( self ) -> None:
"""simple docstring"""
_UpperCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def UpperCAmelCase__ ( self ) -> None:
"""simple docstring"""
_UpperCAmelCase = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
_UpperCAmelCase = Vector([1, 2, 3] )
self.assertEqual('(14,32,50)' , str(a * x ) )
self.assertEqual('|2,4,6|\n|8,10,12|\n|14,16,18|\n' , str(a * 2 ) )
def UpperCAmelCase__ ( self ) -> None:
"""simple docstring"""
_UpperCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual('|1,2,5|\n|2,4,5|\n|6,7,8|\n' , str(_SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase__ ( self ) -> None:
"""simple docstring"""
_UpperCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def UpperCAmelCase__ ( self ) -> None:
"""simple docstring"""
_UpperCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_UpperCAmelCase = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('|2,4,10|\n|4,8,10|\n|12,14,18|\n' , str(a + b ) )
def UpperCAmelCase__ ( self ) -> None:
"""simple docstring"""
_UpperCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_UpperCAmelCase = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('|0,0,-4|\n|0,0,0|\n|0,0,-2|\n' , str(a - b ) )
def UpperCAmelCase__ ( self ) -> None:
"""simple docstring"""
self.assertEqual(
'|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n' , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 329 | from maths.prime_check import is_prime
def __lowercase ( lowerCamelCase : int ):
if not isinstance(lowerCamelCase , lowerCamelCase ):
UpperCamelCase_ : List[str] = F"Input value of [number={number}] must be an integer"
raise TypeError(lowerCamelCase )
if is_prime(lowerCamelCase ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 175 | 0 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE : Any = logging.getLogger()
SCREAMING_SNAKE_CASE : Dict = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
def UpperCamelCase ( self , UpperCamelCase_ ):
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
lowercase_ :List[Any] = {'''source''': '''What is love ?''', '''target''': '''life'''}
lowercase_ :Optional[int] = {'''train''': 12, '''val''': 2, '''test''': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
lowercase_ :Dict = '''\n'''.join([contents[field]] * n_lines[split] )
with open(os.path.join(UpperCamelCase_ , f"{split}.{field}" ) , '''w''' ) as f:
f.write(UpperCamelCase_ )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = "pytorch" ):
lowercase_ :List[str] = self.get_auto_remove_tmp_dir()
lowercase_ :str = os.path.join(UpperCamelCase_ , '''output''' )
lowercase_ :str = os.path.join(UpperCamelCase_ , '''data''' )
self._create_dummy_data(data_dir=UpperCamelCase_ )
lowercase_ :Any = f"\n --data_dir {data_dir} \\n --output_dir {output_dir} \\n --model_name_or_path facebook/rag-sequence-base \\n --model_type rag_sequence \\n --do_train \\n --do_predict \\n --n_val -1 \\n --val_check_interval 1.0 \\n --train_batch_size 2 \\n --eval_batch_size 1 \\n --max_source_length 25 \\n --max_target_length 25 \\n --val_max_target_length 25 \\n --test_max_target_length 25 \\n --label_smoothing 0.1 \\n --dropout 0.1 \\n --attention_dropout 0.1 \\n --weight_decay 0.001 \\n --adam_epsilon 1e-08 \\n --max_grad_norm 0.1 \\n --lr_scheduler polynomial \\n --learning_rate 3e-04 \\n --num_train_epochs 1 \\n --warmup_steps 4 \\n --gradient_accumulation_steps 1 \\n --distributed-port 8787 \\n --use_dummy_dataset 1 \\n --distributed_retriever {distributed_retriever} \\n ".split()
if gpus > 0:
testargs.append(f"--gpus={gpus}" )
if is_apex_available():
testargs.append('''--fp16''' )
else:
testargs.append('''--gpus=0''' )
testargs.append('''--distributed_backend=ddp_cpu''' )
testargs.append('''--num_processes=2''' )
lowercase_ :Tuple = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(UpperCamelCase_ , env=self.get_env() )
lowercase_ :Dict = os.path.join(UpperCamelCase_ , '''metrics.json''' )
with open(UpperCamelCase_ ) as f:
lowercase_ :Optional[int] = json.load(UpperCamelCase_ )
return result
@require_torch_gpu
def UpperCamelCase ( self ):
lowercase_ :str = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
@require_torch_multi_gpu
def UpperCamelCase ( self ):
lowercase_ :int = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
@require_torch_gpu
@require_ray
def UpperCamelCase ( self ):
lowercase_ :Dict = self._run_finetune(gpus=1 , distributed_retriever='''ray''' )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
@require_torch_multi_gpu
@require_ray
def UpperCamelCase ( self ):
lowercase_ :Tuple = self._run_finetune(gpus=1 , distributed_retriever='''ray''' )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
| 252 |
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser()
parser.add_argument("--user", type=str, default="ubuntu")
parser.add_argument("--host", type=str, default="localhost")
parser.add_argument("--key_path", type=str, default=None)
parser.add_argument("--instance", type=str, default="V100:1")
parser.add_argument("--provider", type=str, default="cheapest")
parser.add_argument("--use_spot", type=bool, default=False)
parser.add_argument("--example", type=str, default="pytorch/text-generation/run_generation.py")
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("Cannot specify both BYO and on-demand cluster args")
SCREAMING_SNAKE_CASE : int = rh.cluster(
name="rh-cluster", ips=[args.host], ssh_creds={"ssh_user": args.user, "ssh_private_key": args.key_path}
)
else:
SCREAMING_SNAKE_CASE : Optional[Any] = rh.cluster(
name="rh-cluster", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
SCREAMING_SNAKE_CASE : Any = args.example.rsplit("/", 1)[0]
# Set up remote environment
cluster.install_packages(["pip:./"]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f"pip install -r transformers/examples/{example_dir}/requirements.txt"])
cluster.run(["pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f"python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}"])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 252 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class A__ :
lowercase = 42
lowercase = None
lowercase = None
def UpperCAmelCase__ ( ) -> Node | None:
A_ = Node(1 )
A_ = Node(2 )
A_ = Node(3 )
A_ = Node(4 )
A_ = Node(5 )
return tree
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> list[int]:
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> list[int]:
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> list[int]:
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
return (max(height(root.left ), height(root.right ) ) + 1) if root else 0
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Sequence[Node | None]:
A_ = []
if root is None:
return output
A_ = deque([root] )
while process_queue:
A_ = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Sequence[Node | None]:
A_ = []
def populate_output(UpperCAmelCase__, UpperCAmelCase__ ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left, level - 1 )
populate_output(root.right, level - 1 )
populate_output(UpperCAmelCase__, UpperCAmelCase__ )
return output
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Sequence[Node | None]:
A_ = []
def populate_output(UpperCAmelCase__, UpperCAmelCase__ ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right, level - 1 )
populate_output(root.left, level - 1 )
populate_output(UpperCAmelCase__, UpperCAmelCase__ )
return output
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Sequence[Node | None] | list[Any]:
if root is None:
return []
A_ = []
A_ = 0
A_ = height(UpperCAmelCase__ )
for h in range(1, height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(UpperCAmelCase__, UpperCAmelCase__ ) )
A_ = 1
else:
output.append(get_nodes_from_right_to_left(UpperCAmelCase__, UpperCAmelCase__ ) )
A_ = 0
return output
def UpperCAmelCase__ ( ) -> None: # Main function for testing.
A_ = make_tree()
print(F'''In-order Traversal: {inorder(UpperCAmelCase__ )}''' )
print(F'''Pre-order Traversal: {preorder(UpperCAmelCase__ )}''' )
print(F'''Post-order Traversal: {postorder(UpperCAmelCase__ )}''', """\n""" )
print(F'''Height of Tree: {height(UpperCAmelCase__ )}''', """\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(UpperCAmelCase__ ), """\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1, height(UpperCAmelCase__ ) + 1 ):
print(F'''Level {level}:''', get_nodes_from_left_to_right(UpperCAmelCase__, level=UpperCAmelCase__ ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(UpperCAmelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 162 |
'''simple docstring'''
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
__lowerCamelCase = getLogger(__name__)
__lowerCamelCase = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ = 8, UpperCAmelCase__ = DEFAULT_DEVICE, UpperCAmelCase__=False, UpperCAmelCase__="summarization", UpperCAmelCase__=None, **UpperCAmelCase__, ) -> Dict:
A_ = Path(UpperCAmelCase__ ).open("""w""", encoding="""utf-8""" )
A_ = str(UpperCAmelCase__ )
A_ = AutoModelForSeqaSeqLM.from_pretrained(UpperCAmelCase__ ).to(UpperCAmelCase__ )
if fpaa:
A_ = model.half()
A_ = AutoTokenizer.from_pretrained(UpperCAmelCase__ )
logger.info(F'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type.
A_ = time.time()
# update config with task specific params
use_task_specific_params(UpperCAmelCase__, UpperCAmelCase__ )
if prefix is None:
A_ = prefix or getattr(model.config, """prefix""", """""" ) or """"""
for examples_chunk in tqdm(list(chunks(UpperCAmelCase__, UpperCAmelCase__ ) ) ):
A_ = [prefix + text for text in examples_chunk]
A_ = tokenizer(UpperCAmelCase__, return_tensors="""pt""", truncation=UpperCAmelCase__, padding="""longest""" ).to(UpperCAmelCase__ )
A_ = model.generate(
input_ids=batch.input_ids, attention_mask=batch.attention_mask, **UpperCAmelCase__, )
A_ = tokenizer.batch_decode(UpperCAmelCase__, skip_special_tokens=UpperCAmelCase__, clean_up_tokenization_spaces=UpperCAmelCase__ )
for hypothesis in dec:
fout.write(hypothesis + """\n""" )
fout.flush()
fout.close()
A_ = int(time.time() - start_time ) # seconds
A_ = len(UpperCAmelCase__ )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs, 4 )}
def UpperCAmelCase__ ( ) -> Optional[int]:
return datetime.datetime.now().strftime("""%Y-%m-%d %H:%M:%S""" )
def UpperCAmelCase__ ( UpperCAmelCase__=True ) -> Any:
A_ = argparse.ArgumentParser()
parser.add_argument("""model_name""", type=UpperCAmelCase__, help="""like facebook/bart-large-cnn,t5-base, etc.""" )
parser.add_argument("""input_path""", type=UpperCAmelCase__, help="""like cnn_dm/test.source""" )
parser.add_argument("""save_path""", type=UpperCAmelCase__, help="""where to save summaries""" )
parser.add_argument("""--reference_path""", type=UpperCAmelCase__, required=UpperCAmelCase__, help="""like cnn_dm/test.target""" )
parser.add_argument("""--score_path""", type=UpperCAmelCase__, required=UpperCAmelCase__, default="""metrics.json""", help="""where to save metrics""" )
parser.add_argument("""--device""", type=UpperCAmelCase__, required=UpperCAmelCase__, default=UpperCAmelCase__, help="""cuda, cuda:1, cpu etc.""" )
parser.add_argument(
"""--prefix""", type=UpperCAmelCase__, required=UpperCAmelCase__, default=UpperCAmelCase__, help="""will be added to the begininng of src examples""" )
parser.add_argument("""--task""", type=UpperCAmelCase__, default="""summarization""", help="""used for task_specific_params + metrics""" )
parser.add_argument("""--bs""", type=UpperCAmelCase__, default=8, required=UpperCAmelCase__, help="""batch size""" )
parser.add_argument(
"""--n_obs""", type=UpperCAmelCase__, default=-1, required=UpperCAmelCase__, help="""How many observations. Defaults to all.""" )
parser.add_argument("""--fp16""", action="""store_true""" )
parser.add_argument("""--dump-args""", action="""store_true""", help="""print the custom hparams with the results""" )
parser.add_argument(
"""--info""", nargs="""?""", type=UpperCAmelCase__, const=datetime_now(), help=(
"""use in conjunction w/ --dump-args to print with the results whatever other info you'd like, e.g."""
""" lang=en-ru. If no value is passed, the current datetime string will be used."""
), )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
A_ , A_ = parser.parse_known_args()
A_ = parse_numeric_n_bool_cl_kwargs(UpperCAmelCase__ )
if parsed_args and verbose:
print(F'''parsed the following generate kwargs: {parsed_args}''' )
A_ = [""" """ + x.rstrip() if """t5""" in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
A_ = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=UpperCAmelCase__ )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F'''score_path {args.score_path} will be overwritten unless you type ctrl-c.''' )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError("""Can't mix --fp16 and --device cpu""" )
A_ = generate_summaries_or_translations(
UpperCAmelCase__, args.save_path, args.model_name, batch_size=args.bs, device=args.device, fpaa=args.fpaa, task=args.task, prefix=args.prefix, **UpperCAmelCase__, )
if args.reference_path is None:
return {}
# Compute scores
A_ = calculate_bleu if """translation""" in args.task else calculate_rouge
A_ = [x.rstrip() for x in open(args.save_path ).readlines()]
A_ = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(UpperCAmelCase__ )]
A_ = score_fn(UpperCAmelCase__, UpperCAmelCase__ )
scores.update(UpperCAmelCase__ )
if args.dump_args:
scores.update(UpperCAmelCase__ )
if args.info:
A_ = args.info
if verbose:
print(UpperCAmelCase__ )
if args.score_path is not None:
json.dump(UpperCAmelCase__, open(args.score_path, """w""" ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 162 | 1 |
'''simple docstring'''
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
assert isinstance(snake_case__ , snake_case__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : int = tmp_path / '''cache'''
A : List[str] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A : List[str] = SqlDatasetReader(
'''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_sql_dataset(snake_case__ , snake_case__ )
@require_sqlalchemy
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : Tuple = tmp_path / '''cache'''
A : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
A : Optional[Any] = features.copy() if features else default_expected_features
A : List[str] = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
A : Optional[Any] = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_sql_dataset(snake_case__ , snake_case__ )
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
with contextlib.closing(sqlitea.connect(snake_case__ ) ) as con:
A : Tuple = con.cursor()
cur.execute('''SELECT * FROM dataset''' )
for row in cur:
yield row
@require_sqlalchemy
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : List[str] = tmp_path / '''cache'''
A : str = os.path.join(snake_case__ , '''tmp.sql''' )
A : Tuple = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=snake_case__ ).read()
SqlDatasetWriter(snake_case__ , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=1 ).write()
A : int = iter_sql_file(snake_case__ )
A : List[str] = iter_sql_file(snake_case__ )
for rowa, rowa in zip(snake_case__ , snake_case__ ):
assert rowa == rowa
@require_sqlalchemy
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : str = tmp_path / '''cache'''
A : Dict = os.path.join(snake_case__ , '''tmp.sql''' )
A : Optional[int] = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=snake_case__ ).read()
SqlDatasetWriter(snake_case__ , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=2 ).write()
A : str = iter_sql_file(snake_case__ )
A : Dict = iter_sql_file(snake_case__ )
for rowa, rowa in zip(snake_case__ , snake_case__ ):
assert rowa == rowa
@require_sqlalchemy
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : Dict = tmp_path / '''cache'''
A : Optional[int] = os.path.join(snake_case__ , '''tmp.sql''' )
A : int = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=snake_case__ ).read()
with pytest.raises(snake_case__ ):
SqlDatasetWriter(snake_case__ , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=0 ).write()
| 311 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase : List[str] = {
'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'],
'tokenization_cpmant': ['CpmAntTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = [
'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST',
'CpmAntForCausalLM',
'CpmAntModel',
'CpmAntPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
lowercase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 311 | 1 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Dict = ["image_processor", "tokenizer"]
__A : Dict = "BridgeTowerImageProcessor"
__A : Optional[int] = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self , __A , __A ):
"""simple docstring"""
super().__init__(__A , __A )
def __call__( self , __A , __A = None , __A = True , __A = False , __A = None , __A = None , __A = 0 , __A = None , __A = None , __A = None , __A = False , __A = False , __A = False , __A = False , __A = True , __A = None , **__A , ):
"""simple docstring"""
lowerCamelCase : str = self.tokenizer(
text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_token_type_ids=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , )
# add pixel_values + pixel_mask
lowerCamelCase : int = self.image_processor(
__A , return_tensors=__A , do_normalize=__A , do_center_crop=__A , **__A )
encoding.update(__A )
return encoding
def _snake_case ( self , *__A , **__A ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__A , **__A )
def _snake_case ( self , *__A , **__A ):
"""simple docstring"""
return self.tokenizer.decode(*__A , **__A )
@property
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = self.tokenizer.model_input_names
lowerCamelCase : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 283 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''',
'''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''',
'''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Dict = "mobilenet_v2"
def __init__( self , __A=3 , __A=224 , __A=1.0 , __A=8 , __A=8 , __A=6 , __A=32 , __A=True , __A=True , __A="relu6" , __A=True , __A=0.8 , __A=0.02 , __A=0.001 , __A=255 , **__A , ):
"""simple docstring"""
super().__init__(**__A )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
lowerCamelCase : str = num_channels
lowerCamelCase : Any = image_size
lowerCamelCase : Union[str, Any] = depth_multiplier
lowerCamelCase : Tuple = depth_divisible_by
lowerCamelCase : Dict = min_depth
lowerCamelCase : Dict = expand_ratio
lowerCamelCase : Optional[Any] = output_stride
lowerCamelCase : int = first_layer_is_expansion
lowerCamelCase : Union[str, Any] = finegrained_output
lowerCamelCase : Optional[Any] = hidden_act
lowerCamelCase : Optional[Any] = tf_padding
lowerCamelCase : Optional[Any] = classifier_dropout_prob
lowerCamelCase : Dict = initializer_range
lowerCamelCase : str = layer_norm_eps
lowerCamelCase : Optional[Any] = semantic_loss_ignore_index
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Union[str, Any] = version.parse("1.11" )
@property
def _snake_case ( self ):
"""simple docstring"""
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def _snake_case ( self ):
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def _snake_case ( self ):
"""simple docstring"""
return 1e-4
| 283 | 1 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def lowerCAmelCase_ ( __UpperCAmelCase: Union[str, Any] , __UpperCAmelCase: List[Any]=False ) -> Any:
UpperCamelCase__ : Dict = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"module.blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"module.blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"module.blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"module.blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"module.blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"module.blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"module.blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"module.blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"module.blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"module.blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
('''module.cls_token''', '''vit.embeddings.cls_token'''),
('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''module.pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''module.norm.weight''', '''layernorm.weight'''),
('''module.norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
UpperCamelCase__ : str = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[int] , __UpperCAmelCase: int , __UpperCAmelCase: Any=False ) -> Tuple:
for i in range(config.num_hidden_layers ):
if base_model:
UpperCamelCase__ : Optional[int] = ''''''
else:
UpperCamelCase__ : List[Any] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase__ : int = state_dict.pop(f"module.blocks.{i}.attn.qkv.weight" )
UpperCamelCase__ : Union[str, Any] = state_dict.pop(f"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ : Tuple = in_proj_weight[
: config.hidden_size, :
]
UpperCamelCase__ : List[str] = in_proj_bias[: config.hidden_size]
UpperCamelCase__ : str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase__ : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase__ : Any = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase__ : Optional[int] = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( __UpperCAmelCase: Any ) -> Optional[Any]:
UpperCamelCase__ : Any = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__UpperCAmelCase , __UpperCAmelCase )
def lowerCAmelCase_ ( __UpperCAmelCase: Dict ) -> str:
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
UpperCamelCase__ : Dict = [
'''module.fc.fc1.weight''',
'''module.fc.fc1.bias''',
'''module.fc.bn1.weight''',
'''module.fc.bn1.bias''',
'''module.fc.bn1.running_mean''',
'''module.fc.bn1.running_var''',
'''module.fc.bn1.num_batches_tracked''',
'''module.fc.fc2.weight''',
'''module.fc.fc2.bias''',
'''module.fc.bn2.weight''',
'''module.fc.bn2.bias''',
'''module.fc.bn2.running_mean''',
'''module.fc.bn2.running_var''',
'''module.fc.bn2.num_batches_tracked''',
'''module.fc.fc3.weight''',
'''module.fc.fc3.bias''',
]
for k in ignore_keys:
state_dict.pop(__UpperCAmelCase , __UpperCAmelCase )
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[Any] , __UpperCAmelCase: Dict , __UpperCAmelCase: Optional[int] ) -> Tuple:
UpperCamelCase__ : List[Any] = dct.pop(__UpperCAmelCase )
UpperCamelCase__ : Optional[int] = val
def lowerCAmelCase_ ( __UpperCAmelCase: Tuple , __UpperCAmelCase: Optional[Any] ) -> Optional[Any]:
UpperCamelCase__ : Optional[Any] = ViTMSNConfig()
UpperCamelCase__ : int = 1000
UpperCamelCase__ : Optional[int] = '''datasets/huggingface/label-files'''
UpperCamelCase__ : str = '''imagenet-1k-id2label.json'''
UpperCamelCase__ : List[Any] = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase ) , '''r''' ) )
UpperCamelCase__ : Tuple = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
UpperCamelCase__ : List[str] = idalabel
UpperCamelCase__ : Optional[int] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
UpperCamelCase__ : int = 384
UpperCamelCase__ : Dict = 1536
UpperCamelCase__ : str = 6
elif "l16" in checkpoint_url:
UpperCamelCase__ : int = 1024
UpperCamelCase__ : Optional[int] = 4096
UpperCamelCase__ : int = 24
UpperCamelCase__ : Tuple = 16
UpperCamelCase__ : Dict = 0.1
elif "b4" in checkpoint_url:
UpperCamelCase__ : str = 4
elif "l7" in checkpoint_url:
UpperCamelCase__ : Optional[Any] = 7
UpperCamelCase__ : List[str] = 1024
UpperCamelCase__ : Any = 4096
UpperCamelCase__ : Union[str, Any] = 24
UpperCamelCase__ : Any = 16
UpperCamelCase__ : Any = 0.1
UpperCamelCase__ : Any = ViTMSNModel(__UpperCAmelCase )
UpperCamelCase__ : Optional[Any] = torch.hub.load_state_dict_from_url(__UpperCAmelCase , map_location='''cpu''' )['''target_encoder''']
UpperCamelCase__ : Union[str, Any] = ViTImageProcessor(size=config.image_size )
remove_projection_head(__UpperCAmelCase )
UpperCamelCase__ : List[str] = create_rename_keys(__UpperCAmelCase , base_model=__UpperCAmelCase )
for src, dest in rename_keys:
rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
read_in_q_k_v(__UpperCAmelCase , __UpperCAmelCase , base_model=__UpperCAmelCase )
model.load_state_dict(__UpperCAmelCase )
model.eval()
UpperCamelCase__ : int = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCamelCase__ : Union[str, Any] = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
UpperCamelCase__ : Optional[Any] = ViTImageProcessor(
size=config.image_size , image_mean=__UpperCAmelCase , image_std=__UpperCAmelCase )
UpperCamelCase__ : str = image_processor(images=__UpperCAmelCase , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
UpperCamelCase__ : Optional[int] = model(**__UpperCAmelCase )
UpperCamelCase__ : Any = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
UpperCamelCase__ : Tuple = torch.tensor([[-1.0915, -1.4876, -1.1809]] )
elif "b16" in checkpoint_url:
UpperCamelCase__ : Optional[Any] = torch.tensor([[14.2889, -18.9045, 11.7281]] )
elif "l16" in checkpoint_url:
UpperCamelCase__ : Tuple = torch.tensor([[41.5028, -22.8681, 45.6475]] )
elif "b4" in checkpoint_url:
UpperCamelCase__ : Optional[Any] = torch.tensor([[-4.3868, 5.2932, -0.4137]] )
else:
UpperCamelCase__ : List[Any] = torch.tensor([[-0.1792, -0.6465, 2.4263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , __UpperCAmelCase , atol=1e-4 )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(__UpperCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
UpperCAmelCase_ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 247 |
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'):
UpperCAmelCase_ = True
from torch.cuda.amp import autocast
UpperCAmelCase_ = logging.getLogger(__name__)
@dataclass
class lowercase__ :
'''simple docstring'''
a : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
a : Optional[str] = field(
default=__lowerCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
a : Optional[bool] = field(
default=__lowerCamelCase , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
a : Optional[bool] = field(
default=__lowerCamelCase , metadata={"help": "Whether to log verbose messages or not."} , )
a : Optional[float] = field(
default=2.0 , metadata={"help": "Maximum temperature for gumbel softmax."} )
a : Optional[float] = field(
default=0.5 , metadata={"help": "Minimum temperature for gumbel softmax."} )
a : Optional[float] = field(
default=0.9_9_9_9_9_5 , metadata={"help": "Decay of gumbel temperature during training."} )
def lowerCAmelCase_ ( __UpperCAmelCase: ModelArguments , __UpperCAmelCase: TrainingArguments ) -> Any:
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
UpperCamelCase__ : Tuple = logging.WARNING
if model_args.verbose_logging:
UpperCamelCase__ : List[Any] = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
UpperCamelCase__ : Dict = logging.INFO
logger.setLevel(__UpperCAmelCase )
@dataclass
class lowercase__ :
'''simple docstring'''
a : str = field(
default=__lowerCamelCase , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
a : Optional[str] = field(
default=__lowerCamelCase , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
a : Optional[str] = field(
default="train" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
a : Optional[str] = field(
default="validation" , metadata={
"help": (
"The name of the validation data set split to use (via the datasets library). Defaults to 'validation'"
)
} , )
a : Optional[str] = field(
default="file" , metadata={"help": "Column in the dataset that contains speech file path. Defaults to 'file'"} , )
a : bool = field(
default=__lowerCamelCase , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
a : Optional[int] = field(
default=1 , metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
} , )
a : Optional[int] = field(
default=__lowerCamelCase , metadata={"help": "The number of processes to use for the preprocessing."} , )
a : Optional[float] = field(
default=2_0.0 , metadata={"help": "Filter audio files that are longer than `max_duration_in_seconds` seconds"} )
@dataclass
class lowercase__ :
'''simple docstring'''
a : WavaVecaForPreTraining
a : WavaVecaFeatureExtractor
a : Union[bool, str] = "longest"
a : Optional[int] = None
a : Optional[int] = None
def __call__( self, __magic_name__ ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
# reformat list to dict and set to pytorch format
UpperCamelCase__ : List[Any] = self.feature_extractor.pad(
__magic_name__, max_length=self.max_length, padding=self.padding, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors='''pt''', )
UpperCamelCase__ : Dict = self.model._get_feat_extract_output_lengths(batch['''input_values'''].shape[-1] )
UpperCamelCase__ : Union[str, Any] = batch['''input_values'''].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
UpperCamelCase__ : List[str] = self.model._get_feat_extract_output_lengths(batch['''attention_mask'''].sum(-1 ) ).to(
torch.long )
UpperCamelCase__ : Dict = torch.zeros(
(batch_size, mask_indices_seq_length), dtype=torch.long, device=batch['''input_values'''].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
UpperCamelCase__ : str = 1
UpperCamelCase__ : Union[str, Any] = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
UpperCamelCase__ : Dict = _compute_mask_indices(
(batch_size, mask_indices_seq_length), self.model.config.mask_time_prob, self.model.config.mask_time_length, attention_mask=__magic_name__, min_masks=2, )
return batch
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self, *__magic_name__, __magic_name__=1, __magic_name__=0, __magic_name__=1.0, **__magic_name__ ) -> Dict:
"""simple docstring"""
super().__init__(*__magic_name__, **__magic_name__ )
UpperCamelCase__ : Any = 0
UpperCamelCase__ : List[Any] = max_gumbel_temp
UpperCamelCase__ : List[str] = min_gumbel_temp
UpperCamelCase__ : Any = gumbel_temp_decay
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ ) -> torch.Tensor:
"""simple docstring"""
model.train()
UpperCamelCase__ : str = self._prepare_inputs(__magic_name__ )
if self.use_amp:
with autocast():
UpperCamelCase__ : Optional[Any] = self.compute_loss(__magic_name__, __magic_name__ )
else:
UpperCamelCase__ : Tuple = self.compute_loss(__magic_name__, __magic_name__ )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
UpperCamelCase__ : Any = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
UpperCamelCase__ : List[str] = loss.sum() / (inputs['''mask_time_indices''']).sum()
else:
raise ValueError(f"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']" )
if self.args.gradient_accumulation_steps > 1:
UpperCamelCase__ : Tuple = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__magic_name__ ).backward()
elif self.use_apex:
with amp.scale_loss(__magic_name__, self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__magic_name__ )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp ) )
return loss.detach()
def lowerCAmelCase_ ( ) -> str:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase__ : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : str = parser.parse_args_into_dataclasses()
configure_logger(__UpperCAmelCase , __UpperCAmelCase )
# Downloading and loading a dataset from the hub.
UpperCamelCase__ : str = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
UpperCamelCase__ : Any = DatasetDict()
UpperCamelCase__ : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"{data_args.train_split_name}[:{data_args.validation_split_percentage}%]" , cache_dir=model_args.cache_dir , )
UpperCamelCase__ : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"{data_args.train_split_name}[{data_args.validation_split_percentage}%:]" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
UpperCamelCase__ : int = DatasetDict()
UpperCamelCase__ : str = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split='''validation''' , cache_dir=model_args.cache_dir , )
UpperCamelCase__ : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"{data_args.train_split_name}" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
UpperCamelCase__ : str = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=__UpperCAmelCase )
def prepare_dataset(__UpperCAmelCase: Union[str, Any] ):
# check that all files have the correct sampling rate
UpperCamelCase__ ,UpperCamelCase__ : List[str] = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
UpperCamelCase__ : Any = datasets.map(
__UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets['''train'''].column_names )
# filter audio files that are too long
UpperCamelCase__ : Tuple = vectorized_datasets.filter(
lambda __UpperCAmelCase : len(data['''speech'''] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(__UpperCAmelCase: Optional[int] ):
return feature_extractor(batch['''speech'''] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
UpperCamelCase__ : Any = vectorized_datasets.map(
__UpperCAmelCase , batched=__UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets['''train'''].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
UpperCamelCase__ : int = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
'''PreTraining is only supported for ``config.do_stable_layer_norm=True`` and'''
''' ``config.feat_extract_norm=\'layer\'''' )
UpperCamelCase__ : Optional[int] = WavaVecaForPreTraining(__UpperCAmelCase )
UpperCamelCase__ : List[str] = DataCollatorForWavaVecaPretraining(model=__UpperCAmelCase , feature_extractor=__UpperCAmelCase )
UpperCamelCase__ : List[Any] = WavaVecaPreTrainer(
model=__UpperCAmelCase , data_collator=__UpperCAmelCase , args=__UpperCAmelCase , train_dataset=vectorized_datasets['''train'''] , eval_dataset=vectorized_datasets['''validation'''] , tokenizer=__UpperCAmelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 247 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__snake_case : Union[str, Any] = logging.get_logger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self: Tuple , *_SCREAMING_SNAKE_CASE: Optional[Any] , **_SCREAMING_SNAKE_CASE: Union[str, Any]) -> None:
"""simple docstring"""
warnings.warn(
"The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use OwlViTImageProcessor instead." , _SCREAMING_SNAKE_CASE , )
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE) | 269 |
"""simple docstring"""
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def _lowercase ( __snake_case ) -> List[str]:
if isinstance(__snake_case ,collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class A__ :
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Optional[Any]) -> str:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self: str) -> int:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> Tuple:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self: Any , _SCREAMING_SNAKE_CASE: np.ndarray , _SCREAMING_SNAKE_CASE: np.ndarray , _SCREAMING_SNAKE_CASE: float) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : str = np.abs((a - b)).max()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , F"""Difference between torch and flax is {diff} (>= {tol}).""")
def _SCREAMING_SNAKE_CASE ( self: str , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Any=None , **_SCREAMING_SNAKE_CASE: Tuple) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = FlaxVisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE)
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim))
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim))
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Optional[Any]=None , **_SCREAMING_SNAKE_CASE: List[str]) -> Dict:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = {"vision_model": vision_model, "text_model": text_model}
__lowerCAmelCase : Optional[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE)
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim))
def _SCREAMING_SNAKE_CASE ( self: Dict , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: List[Any]=None , **_SCREAMING_SNAKE_CASE: Tuple) -> str:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : Optional[Any] = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = {"vision_model": vision_model, "text_model": text_model}
__lowerCAmelCase : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Any = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = after_output[0]
__lowerCAmelCase : Any = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-3)
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Any=None , **_SCREAMING_SNAKE_CASE: List[str]) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : Any = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = {"vision_model": vision_model, "text_model": text_model}
__lowerCAmelCase : Optional[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = model(
input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = output.vision_model_output.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , vision_config.num_hidden_layers)
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowerCAmelCase : List[str] = to_atuple(vision_model.config.image_size)
__lowerCAmelCase : Any = to_atuple(vision_model.config.patch_size)
__lowerCAmelCase : Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowerCAmelCase : Tuple = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len))
__lowerCAmelCase : Union[str, Any] = output.text_model_output.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: int) -> str:
"""simple docstring"""
pt_model.to(_SCREAMING_SNAKE_CASE)
pt_model.eval()
# prepare inputs
__lowerCAmelCase : Union[str, Any] = inputs_dict
__lowerCAmelCase : Union[str, Any] = {k: torch.tensor(v.tolist()) for k, v in flax_inputs.items()}
with torch.no_grad():
__lowerCAmelCase : Any = pt_model(**_SCREAMING_SNAKE_CASE).to_tuple()
__lowerCAmelCase : List[Any] = fx_model(**_SCREAMING_SNAKE_CASE).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , len(_SCREAMING_SNAKE_CASE) , "Output lengths differ between Flax and PyTorch")
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4]):
self.assert_almost_equals(_SCREAMING_SNAKE_CASE , pt_output.numpy() , 4e-2)
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = FlaxVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = fx_model_loaded(**_SCREAMING_SNAKE_CASE).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , len(_SCREAMING_SNAKE_CASE) , "Output lengths differ between Flax and PyTorch")
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4]):
self.assert_almost_equals(_SCREAMING_SNAKE_CASE , pt_output.numpy() , 4e-2)
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = VisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_flax=_SCREAMING_SNAKE_CASE)
pt_model_loaded.to(_SCREAMING_SNAKE_CASE)
pt_model_loaded.eval()
with torch.no_grad():
__lowerCAmelCase : Optional[Any] = pt_model_loaded(**_SCREAMING_SNAKE_CASE).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , len(_SCREAMING_SNAKE_CASE) , "Output lengths differ between Flax and PyTorch")
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4]):
self.assert_almost_equals(_SCREAMING_SNAKE_CASE , pt_output_loaded.numpy() , 4e-2)
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Dict) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = VisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = FlaxVisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = fx_state
self.check_pt_flax_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Dict) -> str:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = VisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = FlaxVisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = load_flax_weights_in_pytorch_model(_SCREAMING_SNAKE_CASE , fx_model.params)
self.check_pt_flax_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> str:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Dict) -> int:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Dict = self.prepare_config_and_inputs()
self.check_save_load(**_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: int) -> Dict:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_SCREAMING_SNAKE_CASE)
@is_pt_flax_cross_test
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Any:
"""simple docstring"""
__lowerCAmelCase : Dict = self.prepare_config_and_inputs()
__lowerCAmelCase : List[Any] = config_inputs_dict.pop("vision_config")
__lowerCAmelCase : str = config_inputs_dict.pop("text_config")
__lowerCAmelCase : Union[str, Any] = config_inputs_dict
self.check_equivalence_pt_to_flax(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
self.check_equivalence_flax_to_pt(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
@slow
def _SCREAMING_SNAKE_CASE ( self: str) -> Dict:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : Dict = self.get_pretrained_model_and_inputs()
__lowerCAmelCase : Union[str, Any] = model_a(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = FlaxVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = model_a(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = after_outputs[0]
__lowerCAmelCase : List[Any] = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-5)
@require_flax
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-bert" , vision_from_pt=_SCREAMING_SNAKE_CASE , text_from_pt=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Union[str, Any] = 13
__lowerCAmelCase : Optional[int] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
])
__lowerCAmelCase : List[Any] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size)
__lowerCAmelCase : List[Any] = random_attention_mask([batch_size, 4])
__lowerCAmelCase : str = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def _SCREAMING_SNAKE_CASE ( self: int , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Union[str, Any]) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : List[str] = FlaxViTModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = FlaxBertModel(_SCREAMING_SNAKE_CASE)
return vision_model, text_model
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> int:
"""simple docstring"""
__lowerCAmelCase : List[Any] = FlaxViTModelTester(self)
__lowerCAmelCase : Optional[Any] = FlaxBertModelTester(self)
__lowerCAmelCase : int = vit_model_tester.prepare_config_and_inputs()
__lowerCAmelCase : List[str] = bert_model_tester.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase : Tuple = vision_config_and_inputs
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: Dict) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-clip" , "hf-internal-testing/tiny-bert" , vision_from_pt=_SCREAMING_SNAKE_CASE , text_from_pt=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Optional[int] = 13
__lowerCAmelCase : List[str] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
])
__lowerCAmelCase : Any = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size)
__lowerCAmelCase : str = random_attention_mask([batch_size, 4])
__lowerCAmelCase : Optional[int] = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Union[str, Any]) -> int:
"""simple docstring"""
__lowerCAmelCase : int = FlaxCLIPVisionModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = FlaxBertModel(_SCREAMING_SNAKE_CASE)
return vision_model, text_model
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = FlaxCLIPVisionModelTester(self)
__lowerCAmelCase : str = FlaxBertModelTester(self)
__lowerCAmelCase : Optional[Any] = clip_model_tester.prepare_config_and_inputs()
__lowerCAmelCase : Dict = bert_model_tester.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase : Any = vision_config_and_inputs
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : List[Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class A__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Dict = FlaxVisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian" , logit_scale_init_value=1.0)
__lowerCAmelCase : str = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian")
__lowerCAmelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
__lowerCAmelCase : Optional[int] = processor(
text=["una foto di un gatto", "una foto di un cane"] , images=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors="np")
__lowerCAmelCase : List[str] = model(**_SCREAMING_SNAKE_CASE)
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]))
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__lowerCAmelCase : List[str] = np.array([[1.228_4727, 0.310_4122]])
self.assertTrue(np.allclose(outputs.logits_per_image , _SCREAMING_SNAKE_CASE , atol=1e-3)) | 269 | 1 |
def _UpperCAmelCase (UpperCamelCase_ : bytes ):
'''simple docstring'''
return "".join([hex(UpperCamelCase_ )[2:].zfill(2 ).upper() for byte in list(UpperCamelCase_ )] )
def _UpperCAmelCase (UpperCamelCase_ : str ):
'''simple docstring'''
if (len(UpperCamelCase_ ) % 2) != 0:
raise ValueError(
"""Base16 encoded data is invalid:
Data does not have an even number of hex digits.""" )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(UpperCamelCase_ ) <= set("""0123456789ABCDEF""" ):
raise ValueError(
"""Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.""" )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(UpperCamelCase_ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357 |
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def _UpperCAmelCase (UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = k_size // 2
_lowerCAmelCase , _lowerCAmelCase : List[str] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
_lowerCAmelCase : Optional[int] = 1 / (2 * pi * sigma) * exp(-(square(UpperCamelCase_ ) + square(UpperCamelCase_ )) / (2 * square(UpperCamelCase_ )) )
return g
def _UpperCAmelCase (UpperCamelCase_ : str , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : str = image.shape[0], image.shape[1]
# dst image height and width
_lowerCAmelCase : Optional[Any] = height - k_size + 1
_lowerCAmelCase : Tuple = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
_lowerCAmelCase : Tuple = zeros((dst_height * dst_width, k_size * k_size) )
_lowerCAmelCase : int = 0
for i, j in product(range(UpperCamelCase_ ) , range(UpperCamelCase_ ) ):
_lowerCAmelCase : Optional[int] = ravel(image[i : i + k_size, j : j + k_size] )
_lowerCAmelCase : Dict = window
row += 1
# turn the kernel into shape(k*k, 1)
_lowerCAmelCase : Optional[Any] = gen_gaussian_kernel(UpperCamelCase_ , UpperCamelCase_ )
_lowerCAmelCase : Dict = ravel(UpperCamelCase_ )
# reshape and get the dst image
_lowerCAmelCase : List[str] = dot(UpperCamelCase_ , UpperCamelCase_ ).reshape(UpperCamelCase_ , UpperCamelCase_ ).astype(UpperCamelCase_ )
return dst
if __name__ == "__main__":
# read original image
_lowerCamelCase : int = imread(R"../image_data/lena.jpg")
# turn image in gray scale value
_lowerCamelCase : List[Any] = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
_lowerCamelCase : int = gaussian_filter(gray, 3, sigma=1)
_lowerCamelCase : List[str] = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("gaussian filter with 3x3 mask", gaussianaxa)
imshow("gaussian filter with 5x5 mask", gaussianaxa)
waitKey()
| 159 | 0 |
'''simple docstring'''
def __UpperCamelCase ( UpperCAmelCase = "The quick brown fox jumps over the lazy dog" , ):
lowercase__ : Optional[int] = set()
# Replace all the whitespace in our sentence
lowercase__ : Dict = input_str.replace(''' ''' , '''''' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(UpperCAmelCase ) == 26
def __UpperCamelCase ( UpperCAmelCase = "The quick brown fox jumps over the lazy dog" , ):
lowercase__ : str = [False] * 26
for char in input_str:
if char.islower():
lowercase__ : List[str] = True
elif char.isupper():
lowercase__ : str = True
return all(UpperCAmelCase )
def __UpperCamelCase ( UpperCAmelCase = "The quick brown fox jumps over the lazy dog" , ):
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def __UpperCamelCase ( ):
from timeit import timeit
lowercase__ : Dict = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' , setup=UpperCAmelCase ) )
print(timeit('''is_pangram_faster()''' , setup=UpperCAmelCase ) )
print(timeit('''is_pangram_fastest()''' , setup=UpperCAmelCase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 198 | '''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__a: List[str] = logging.get_logger(__name__)
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ["pixel_values"]
def __init__( self , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = PILImageResampling.BICUBIC , __lowerCAmelCase = True , __lowerCAmelCase = 1 / 255 , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = True , **__lowerCAmelCase , ) -> None:
super().__init__(**__lowerCAmelCase )
lowercase__ : Optional[int] = size if size is not None else {'''height''': 384, '''width''': 384}
lowercase__ : Optional[Any] = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase )
lowercase__ : Dict = do_resize
lowercase__ : int = size
lowercase__ : int = resample
lowercase__ : Tuple = do_rescale
lowercase__ : int = rescale_factor
lowercase__ : int = do_normalize
lowercase__ : Optional[int] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowercase__ : Optional[int] = image_std if image_std is not None else OPENAI_CLIP_STD
lowercase__ : Tuple = do_convert_rgb
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = PILImageResampling.BICUBIC , __lowerCAmelCase = None , **__lowerCAmelCase , ) -> np.ndarray:
lowercase__ : Union[str, Any] = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
lowercase__ : Any = (size['''height'''], size['''width'''])
return resize(__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ) -> str:
return rescale(__lowerCAmelCase , scale=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ) -> np.ndarray:
return normalize(__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = ChannelDimension.FIRST , **__lowerCAmelCase , ) -> PIL.Image.Image:
lowercase__ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
lowercase__ : Any = resample if resample is not None else self.resample
lowercase__ : int = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ : Dict = image_mean if image_mean is not None else self.image_mean
lowercase__ : Dict = image_std if image_std is not None else self.image_std
lowercase__ : Dict = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase__ : Optional[int] = size if size is not None else self.size
lowercase__ : int = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase )
lowercase__ : str = make_list_of_images(__lowerCAmelCase )
if not valid_images(__lowerCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase__ : Optional[Any] = [convert_to_rgb(__lowerCAmelCase ) for image in images]
# All transformations expect numpy arrays.
lowercase__ : Any = [to_numpy_array(__lowerCAmelCase ) for image in images]
if do_resize:
lowercase__ : Tuple = [self.resize(image=__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase ) for image in images]
if do_rescale:
lowercase__ : List[str] = [self.rescale(image=__lowerCAmelCase , scale=__lowerCAmelCase ) for image in images]
if do_normalize:
lowercase__ : Tuple = [self.normalize(image=__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase ) for image in images]
lowercase__ : List[str] = [to_channel_dimension_format(__lowerCAmelCase , __lowerCAmelCase ) for image in images]
lowercase__ : Optional[Any] = BatchFeature(data={'''pixel_values''': images} , tensor_type=__lowerCAmelCase )
return encoded_outputs
| 198 | 1 |
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def A_ ( A__ , A__ , A__ ) -> Union[str, Any]:
def get_masked_lm_array(A__ ):
a__ : Union[str, Any] = F'masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE'
a__ : Optional[Any] = tf.train.load_variable(A__ , A__ )
if "kernel" in name:
a__ : Tuple = array.transpose()
return torch.from_numpy(A__ )
def get_encoder_array(A__ ):
a__ : str = F'encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE'
a__ : Any = tf.train.load_variable(A__ , A__ )
if "kernel" in name:
a__ : List[Any] = array.transpose()
return torch.from_numpy(A__ )
def get_encoder_layer_array(A__ , A__ ):
a__ : Dict = F'encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE'
a__ : List[Any] = tf.train.load_variable(A__ , A__ )
if "kernel" in name:
a__ : List[str] = array.transpose()
return torch.from_numpy(A__ )
def get_encoder_attention_layer_array(A__ , A__ , A__ ):
a__ : int = F'encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE'
a__ : Tuple = tf.train.load_variable(A__ , A__ )
a__ : Union[str, Any] = array.reshape(A__ )
if "kernel" in name:
a__ : str = array.transpose()
return torch.from_numpy(A__ )
print(F'Loading model based on config from {config_path}...' )
a__ : Tuple = BertConfig.from_json_file(A__ )
a__ : Tuple = BertForMaskedLM(A__ )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
a__ : BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
a__ : BertSelfAttention = layer.attention.self
a__ : Tuple = get_encoder_attention_layer_array(
A__ , '_query_dense/kernel' , self_attn.query.weight.data.shape )
a__ : Tuple = get_encoder_attention_layer_array(
A__ , '_query_dense/bias' , self_attn.query.bias.data.shape )
a__ : Any = get_encoder_attention_layer_array(
A__ , '_key_dense/kernel' , self_attn.key.weight.data.shape )
a__ : Union[str, Any] = get_encoder_attention_layer_array(
A__ , '_key_dense/bias' , self_attn.key.bias.data.shape )
a__ : int = get_encoder_attention_layer_array(
A__ , '_value_dense/kernel' , self_attn.value.weight.data.shape )
a__ : Dict = get_encoder_attention_layer_array(
A__ , '_value_dense/bias' , self_attn.value.bias.data.shape )
# Self-attention Output
a__ : BertSelfOutput = layer.attention.output
a__ : str = get_encoder_attention_layer_array(
A__ , '_output_dense/kernel' , self_output.dense.weight.data.shape )
a__ : List[str] = get_encoder_attention_layer_array(
A__ , '_output_dense/bias' , self_output.dense.bias.data.shape )
a__ : List[str] = get_encoder_layer_array(A__ , '_attention_layer_norm/gamma' )
a__ : Dict = get_encoder_layer_array(A__ , '_attention_layer_norm/beta' )
# Intermediate
a__ : BertIntermediate = layer.intermediate
a__ : Dict = get_encoder_layer_array(A__ , '_intermediate_dense/kernel' )
a__ : Dict = get_encoder_layer_array(A__ , '_intermediate_dense/bias' )
# Output
a__ : BertOutput = layer.output
a__ : List[str] = get_encoder_layer_array(A__ , '_output_dense/kernel' )
a__ : Optional[Any] = get_encoder_layer_array(A__ , '_output_dense/bias' )
a__ : Optional[Any] = get_encoder_layer_array(A__ , '_output_layer_norm/gamma' )
a__ : List[Any] = get_encoder_layer_array(A__ , '_output_layer_norm/beta' )
# Embeddings
a__ : str = get_encoder_array('_position_embedding_layer/embeddings' )
a__ : Union[str, Any] = get_encoder_array('_type_embedding_layer/embeddings' )
a__ : Dict = get_encoder_array('_embedding_norm_layer/gamma' )
a__ : Any = get_encoder_array('_embedding_norm_layer/beta' )
# LM Head
a__ : Any = model.cls.predictions.transform
a__ : Optional[int] = get_masked_lm_array('dense/kernel' )
a__ : List[str] = get_masked_lm_array('dense/bias' )
a__ : Union[str, Any] = get_masked_lm_array('layer_norm/gamma' )
a__ : Optional[int] = get_masked_lm_array('layer_norm/beta' )
a__ : Dict = get_masked_lm_array('embedding_table' )
# Pooling
a__ : Optional[Any] = BertPooler(config=A__ )
a__ : BertPooler = get_encoder_array('_pooler_layer/kernel' )
a__ : BertPooler = get_encoder_array('_pooler_layer/bias' )
# Export final model
model.save_pretrained(A__ )
# Integration test - should load without any errors ;)
a__ : Any = BertForMaskedLM.from_pretrained(A__ )
print(new_model.eval() )
print('Model conversion was done sucessfully!' )
if __name__ == "__main__":
lowercase : str = argparse.ArgumentParser()
parser.add_argument(
"""--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow Token Dropping checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
type=str,
required=True,
help="""The config json file corresponding to the BERT model. This specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""",
type=str,
required=True,
help="""Path to the output PyTorch model.""",
)
lowercase : Tuple = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 354 |
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def A_ ( A__ ) -> Dict:
if isinstance(A__ , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class A__ :
"""simple docstring"""
def __lowercase ( self , lowercase , lowercase) -> Union[str, Any]:
'''simple docstring'''
pass
def __lowercase ( self) -> Dict:
'''simple docstring'''
pass
def __lowercase ( self) -> Dict:
'''simple docstring'''
pass
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase) -> List[Any]:
'''simple docstring'''
a__ : Any = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase , lowercase)
a__ : Any = TFVisionTextDualEncoderModel(lowercase)
a__ : Dict = model(input_ids=lowercase , pixel_values=lowercase , attention_mask=lowercase)
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim))
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim))
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase) -> List[Any]:
'''simple docstring'''
a__ , a__ : List[Any] = self.get_vision_text_model(lowercase , lowercase)
a__ : List[str] = TFVisionTextDualEncoderModel(vision_model=lowercase , text_model=lowercase)
a__ : Dict = model(input_ids=lowercase , pixel_values=lowercase , attention_mask=lowercase)
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim))
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase) -> Tuple:
'''simple docstring'''
a__ , a__ : Any = self.get_vision_text_model(lowercase , lowercase)
a__ : Tuple = {'vision_model': vision_model, 'text_model': text_model}
a__ : Union[str, Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase)
a__ : Any = model(input_ids=lowercase , pixel_values=lowercase , attention_mask=lowercase)
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim))
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase) -> Optional[Any]:
'''simple docstring'''
a__ , a__ : int = self.get_vision_text_model(lowercase , lowercase)
a__ : List[Any] = TFVisionTextDualEncoderModel(vision_model=lowercase , text_model=lowercase)
a__ : Optional[Any] = model(input_ids=lowercase , pixel_values=lowercase , attention_mask=lowercase)
a__ : int = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase)
a__ : str = TFVisionTextDualEncoderModel.from_pretrained(lowercase)
a__ : List[str] = model(input_ids=lowercase , pixel_values=lowercase , attention_mask=lowercase)
a__ : str = after_output[0].numpy()
a__ : str = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(lowercase , 1e-5)
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase) -> Optional[int]:
'''simple docstring'''
a__ , a__ : Optional[Any] = self.get_vision_text_model(lowercase , lowercase)
a__ : Dict = TFVisionTextDualEncoderModel(vision_model=lowercase , text_model=lowercase)
a__ : Optional[int] = model(
input_ids=lowercase , pixel_values=lowercase , attention_mask=lowercase , output_attentions=lowercase)
a__ : List[str] = output.vision_model_output.attentions
self.assertEqual(len(lowercase) , vision_config.num_hidden_layers)
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
a__ : Optional[Any] = to_atuple(vision_model.config.image_size)
a__ : Dict = to_atuple(vision_model.config.patch_size)
a__ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
a__ : List[str] = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len))
a__ : Union[str, Any] = output.text_model_output.attentions
self.assertEqual(len(lowercase) , text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __lowercase ( self , lowercase , lowercase , lowercase) -> Any:
'''simple docstring'''
a__ : str = np.abs((a - b)).max()
self.assertLessEqual(lowercase , lowercase , F'Difference between torch and flax is {diff} (>= {tol}).')
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : List[Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**lowercase)
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ : List[Any] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowercase)
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Any = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowercase)
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : int = self.prepare_config_and_inputs()
self.check_save_load(**lowercase)
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
a__ : List[str] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowercase)
@slow
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ , a__ : Union[str, Any] = self.get_pretrained_model_and_inputs()
a__ : Optional[int] = model_a(**lowercase)
a__ : int = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowercase)
a__ : Union[str, Any] = TFVisionTextDualEncoderModel.from_pretrained(lowercase)
a__ : int = model_a(**lowercase)
a__ : str = after_outputs[0].numpy()
a__ : List[Any] = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(lowercase , 1e-5)
@require_tf
class A__ ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ : Any = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-random-bert')
a__ : str = 13
a__ : Optional[int] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
])
a__ : Tuple = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size)
a__ : Optional[int] = random_attention_mask([batch_size, 4])
a__ : str = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def __lowercase ( self , lowercase , lowercase) -> List[Any]:
'''simple docstring'''
a__ : Optional[Any] = TFViTModel(lowercase , name='vision_model')
a__ : Tuple = TFBertModel(lowercase , name='text_model')
return vision_model, text_model
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : Tuple = TFViTModelTester(self)
a__ : int = TFBertModelTester(self)
a__ : Optional[Any] = vit_model_tester.prepare_config_and_inputs()
a__ : Any = bert_model_tester.prepare_config_and_inputs()
a__ , a__ , a__ : Optional[int] = vision_config_and_inputs
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) : Any = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class A__ ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self) -> List[str]:
'''simple docstring'''
a__ : Optional[Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-deit-tf' , 'hf-internal-testing/tiny-random-roberta')
a__ : Union[str, Any] = 13
a__ : Dict = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
])
a__ : Optional[Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size)
a__ : Any = random_attention_mask([batch_size, 4])
a__ : int = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase) -> Optional[Any]:
'''simple docstring'''
a__ , a__ : List[Any] = self.get_vision_text_model(lowercase , lowercase)
a__ : Any = TFVisionTextDualEncoderModel(vision_model=lowercase , text_model=lowercase)
a__ : int = model(
input_ids=lowercase , pixel_values=lowercase , attention_mask=lowercase , output_attentions=lowercase)
a__ : Optional[Any] = output.vision_model_output.attentions
self.assertEqual(len(lowercase) , vision_config.num_hidden_layers)
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
a__ : Optional[int] = to_atuple(vision_model.config.image_size)
a__ : str = to_atuple(vision_model.config.patch_size)
a__ : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
a__ : List[str] = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len))
a__ : List[Any] = output.text_model_output.attentions
self.assertEqual(len(lowercase) , text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __lowercase ( self , lowercase , lowercase) -> Optional[Any]:
'''simple docstring'''
a__ : List[str] = TFDeiTModel(lowercase , name='vision_model')
a__ : Optional[int] = TFRobertaModel(lowercase , name='text_model')
return vision_model, text_model
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Optional[int] = TFDeiTModelTester(self)
a__ : str = TFRobertaModelTester(self)
a__ : str = vit_model_tester.prepare_config_and_inputs()
a__ : Dict = bert_model_tester.prepare_config_and_inputs()
a__ , a__ , a__ : Any = vision_config_and_inputs
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) : Tuple = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class A__ ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : Dict = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-clip-tf' , 'hf-internal-testing/tiny-random-bert')
a__ : Optional[int] = 13
a__ : Optional[Any] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
])
a__ : int = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size)
a__ : str = random_attention_mask([batch_size, 4])
a__ : int = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def __lowercase ( self , lowercase , lowercase) -> Optional[Any]:
'''simple docstring'''
a__ : str = TFCLIPVisionModel(lowercase , name='vision_model')
a__ : str = TFBertModel(lowercase , name='text_model')
return vision_model, text_model
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ : List[str] = TFCLIPVisionModelTester(self)
a__ : Dict = TFBertModelTester(self)
a__ : Optional[Any] = clip_model_tester.prepare_config_and_inputs()
a__ : List[Any] = bert_model_tester.prepare_config_and_inputs()
a__ , a__ : Union[str, Any] = vision_config_and_inputs
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) : Optional[int] = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class A__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : Tuple = TFVisionTextDualEncoderModel.from_pretrained(
'clip-italian/clip-italian' , logit_scale_init_value=1.0 , from_pt=lowercase)
a__ : str = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian')
a__ : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
a__ : Optional[Any] = processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=lowercase , padding=lowercase , return_tensors='np')
a__ : int = model(**lowercase)
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]))
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
a__ : List[str] = np.array([[1.2_28_47_27, 0.3_10_41_22]])
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , lowercase , atol=1e-3))
| 225 | 0 |
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase = 1_000_000 ) -> int:
'''simple docstring'''
lowerCAmelCase : Optional[Any] = [i - 1 for i in range(limit + 1 )]
for i in range(2, limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i, limit + 1, _UpperCAmelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 138 |
from torch import nn
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> str:
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f"Unsupported activation function: {act_fn}" )
| 138 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class __A :
'''simple docstring'''
def __init__( self : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict=13 , UpperCAmelCase_ : str=7 , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : List[str]=99 , UpperCAmelCase_ : List[str]=32 , UpperCAmelCase_ : List[Any]=2 , UpperCAmelCase_ : List[str]=4 , UpperCAmelCase_ : str=37 , UpperCAmelCase_ : Tuple="gelu" , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : str=512 , UpperCAmelCase_ : int=16 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : Tuple=0.02 , UpperCAmelCase_ : Any=3 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Dict=0 , ):
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
snake_case_ = projection_dim
def lowerCAmelCase ( self : List[str] ):
"""simple docstring"""
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , )
snake_case_ = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple ):
"""simple docstring"""
snake_case_ = TFDPRContextEncoder(config=UpperCAmelCase_ )
snake_case_ = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
snake_case_ = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
snake_case_ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def lowerCAmelCase ( self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] ):
"""simple docstring"""
snake_case_ = TFDPRQuestionEncoder(config=UpperCAmelCase_ )
snake_case_ = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
snake_case_ = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
snake_case_ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def lowerCAmelCase ( self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple ):
"""simple docstring"""
snake_case_ = TFDPRReader(config=UpperCAmelCase_ )
snake_case_ = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = config_and_inputs
snake_case_ = {"""input_ids""": input_ids}
return config, inputs_dict
@require_tf
class __A (snake_case__ , snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: int = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
__lowercase: Union[str, Any] = {"""feature-extraction""": TFDPRQuestionEncoder} if is_tf_available() else {}
__lowercase: Tuple = False
__lowercase: Optional[Any] = False
__lowercase: str = False
__lowercase: Tuple = False
__lowercase: Tuple = False
def lowerCAmelCase ( self : Optional[int] ):
"""simple docstring"""
snake_case_ = TFDPRModelTester(self )
snake_case_ = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def lowerCAmelCase ( self : Dict ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*UpperCAmelCase_ )
def lowerCAmelCase ( self : List[Any] ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*UpperCAmelCase_ )
def lowerCAmelCase ( self : List[Any] ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*UpperCAmelCase_ )
@slow
def lowerCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = TFDPRContextEncoder.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = TFDPRContextEncoder.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = TFDPRQuestionEncoder.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = TFDPRReader.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@require_tf
class __A (unittest.TestCase):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : List[str] ):
"""simple docstring"""
snake_case_ = TFDPRQuestionEncoder.from_pretrained("""facebook/dpr-question_encoder-single-nq-base""" )
snake_case_ = tf.constant(
[[101, 7_592, 1_010, 2_003, 2_026, 3_899, 10_140, 1_029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
snake_case_ = model(UpperCAmelCase_ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
snake_case_ = tf.constant(
[
[
0.03_236_253,
0.12_753_335,
0.16_818_509,
0.00_279_786,
0.3_896_933,
0.24_264_945,
0.2_178_971,
-0.02_335_227,
-0.08_481_959,
-0.14_324_117,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 367 |
"""simple docstring"""
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ) -> Optional[Any]:
if attention_mask is None:
snake_case_ = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
snake_case_ = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
snake_case_ = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=_SCREAMING_SNAKE_CASE )
if decoder_head_mask is None:
snake_case_ = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=_SCREAMING_SNAKE_CASE )
if cross_attn_head_mask is None:
snake_case_ = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=_SCREAMING_SNAKE_CASE )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class __A :
'''simple docstring'''
def __init__( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : str=13 , UpperCAmelCase_ : Optional[int]=7 , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : int=99 , UpperCAmelCase_ : Union[str, Any]=16 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : Optional[Any]="relu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : List[Any]=0.0 , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : List[str]=20 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : List[Any]=1 , UpperCAmelCase_ : Optional[Any]=0 , ) ->Dict:
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = encoder_layerdrop
snake_case_ = decoder_layerdrop
snake_case_ = max_position_embeddings
snake_case_ = eos_token_id
snake_case_ = pad_token_id
snake_case_ = bos_token_id
def lowerCAmelCase ( self : Tuple ) ->List[str]:
"""simple docstring"""
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = self.eos_token_id # Eos Token
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
snake_case_ = input_ids.clamp(self.pad_token_id + 1 )
snake_case_ = decoder_input_ids.clamp(self.pad_token_id + 1 )
snake_case_ = self.get_config()
snake_case_ = prepare_mam_aaa_inputs_dict(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return config, inputs_dict
def lowerCAmelCase ( self : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def lowerCAmelCase ( self : int ) ->Optional[int]:
"""simple docstring"""
snake_case_ , snake_case_ = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict ) ->Dict:
"""simple docstring"""
snake_case_ = MaMaaaModel(config=UpperCAmelCase_ ).get_decoder().to(UpperCAmelCase_ ).eval()
snake_case_ = inputs_dict["""input_ids"""]
snake_case_ = inputs_dict["""attention_mask"""]
snake_case_ = inputs_dict["""head_mask"""]
# first forward pass
snake_case_ = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , head_mask=UpperCAmelCase_ , use_cache=UpperCAmelCase_ )
snake_case_ , snake_case_ = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
snake_case_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case_ = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
snake_case_ = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case_ = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
snake_case_ = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )["""last_hidden_state"""]
snake_case_ = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ )[
"""last_hidden_state"""
]
# select random slice
snake_case_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case_ = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-2 ) )
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict ) ->int:
"""simple docstring"""
snake_case_ = MaMaaaModel(config=UpperCAmelCase_ ).to(UpperCAmelCase_ ).eval()
snake_case_ = model(**UpperCAmelCase_ )
snake_case_ = outputs.encoder_last_hidden_state
snake_case_ = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ = model.get_encoder()
encoder.save_pretrained(UpperCAmelCase_ )
snake_case_ = MaMaaaEncoder.from_pretrained(UpperCAmelCase_ ).to(UpperCAmelCase_ )
snake_case_ = encoder(inputs_dict["""input_ids"""] , attention_mask=inputs_dict["""attention_mask"""] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ = model.get_decoder()
decoder.save_pretrained(UpperCAmelCase_ )
snake_case_ = MaMaaaDecoder.from_pretrained(UpperCAmelCase_ ).to(UpperCAmelCase_ )
snake_case_ = decoder(
input_ids=inputs_dict["""decoder_input_ids"""] , attention_mask=inputs_dict["""decoder_attention_mask"""] , encoder_hidden_states=UpperCAmelCase_ , encoder_attention_mask=inputs_dict["""attention_mask"""] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class __A (snake_case__ , snake_case__ , snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Optional[Any] = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
__lowercase: Union[str, Any] = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
__lowercase: Tuple = (
{
"""conversational""": MaMaaaForConditionalGeneration,
"""feature-extraction""": MaMaaaModel,
"""summarization""": MaMaaaForConditionalGeneration,
"""text2text-generation""": MaMaaaForConditionalGeneration,
"""translation""": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
__lowercase: Dict = True
__lowercase: List[Any] = True
__lowercase: Union[str, Any] = False
__lowercase: Optional[int] = False
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int ) ->str:
"""simple docstring"""
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def lowerCAmelCase ( self : int ) ->Dict:
"""simple docstring"""
snake_case_ = MaMaaaModelTester(self )
snake_case_ = ConfigTester(self , config_class=UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
snake_case_ = model_class(UpperCAmelCase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase_ )
snake_case_ , snake_case_ = model_class.from_pretrained(UpperCAmelCase_ , output_loading_info=UpperCAmelCase_ )
self.assertEqual(info["""missing_keys"""] , [] )
def lowerCAmelCase ( self : Optional[int] ) ->List[Any]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*UpperCAmelCase_ )
def lowerCAmelCase ( self : str ) ->List[str]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*UpperCAmelCase_ )
def lowerCAmelCase ( self : str ) ->List[str]:
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
snake_case_ = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = copy.deepcopy(self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
if not self.is_encoder_decoder:
snake_case_ = inputs["""input_ids"""]
del inputs["input_ids"]
else:
snake_case_ = inputs["""input_ids"""]
snake_case_ = inputs.get("""decoder_input_ids""" , UpperCAmelCase_ )
del inputs["input_ids"]
inputs.pop("""decoder_input_ids""" , UpperCAmelCase_ )
snake_case_ = model.get_input_embeddings()
if not self.is_encoder_decoder:
snake_case_ = wte(UpperCAmelCase_ )
else:
snake_case_ = wte(UpperCAmelCase_ )
snake_case_ = wte(UpperCAmelCase_ )
with torch.no_grad():
model(**UpperCAmelCase_ )[0]
def lowerCAmelCase ( self : Any ) ->Any:
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs()
snake_case_ = input_dict["""input_ids"""]
snake_case_ = input_ids.ne(1 ).to(UpperCAmelCase_ )
snake_case_ = MaMaaaForConditionalGeneration(UpperCAmelCase_ ).eval().to(UpperCAmelCase_ )
if torch_device == "cuda":
model.half()
model.generate(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
model.generate(num_beams=4 , do_sample=UpperCAmelCase_ , early_stopping=UpperCAmelCase_ , num_return_sequences=3 )
def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
return torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Tuple = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class __A (unittest.TestCase):
'''simple docstring'''
@cached_property
def lowerCAmelCase ( self : Dict ) ->str:
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" )
def lowerCAmelCase ( self : str ) ->Any:
"""simple docstring"""
snake_case_ = MaMaaaModel.from_pretrained("""facebook/m2m100_418M""" ).to(UpperCAmelCase_ )
snake_case_ = _long_tensor([[128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38, 2]] )
snake_case_ = _long_tensor([[2, 128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38]] )
snake_case_ = prepare_mam_aaa_inputs_dict(model.config , UpperCAmelCase_ , UpperCAmelCase_ )
with torch.no_grad():
snake_case_ = model(**UpperCAmelCase_ )[0]
snake_case_ = torch.Size((1, 11, 1_024) )
self.assertEqual(output.shape , UpperCAmelCase_ )
# change to expected output here
snake_case_ = torch.tensor(
[[-0.7_780, -0.1_676, 0.1_038], [-6.7_556, -1.3_992, 0.0_567], [-7.5_383, -0.5_920, -0.2_779]] , device=UpperCAmelCase_ )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=UpperCAmelCase_ ) )
def lowerCAmelCase ( self : Optional[int] ) ->Any:
"""simple docstring"""
snake_case_ = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(UpperCAmelCase_ )
# change to intended input
snake_case_ = _long_tensor([[128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38, 2]] )
snake_case_ = _long_tensor([[2, 128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38]] )
snake_case_ = prepare_mam_aaa_inputs_dict(model.config , UpperCAmelCase_ , UpperCAmelCase_ )
with torch.no_grad():
snake_case_ = model(**UpperCAmelCase_ )[0]
snake_case_ = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , UpperCAmelCase_ )
# change to expected output here
snake_case_ = torch.tensor(
[[-1.0_448, -1.0_411, 3.7_992], [-3.2_191, -3.2_386, -1.3_451], [-3.6_210, -3.5_993, 0.4_925]] , device=UpperCAmelCase_ )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=UpperCAmelCase_ ) )
def lowerCAmelCase ( self : Dict ) ->str:
"""simple docstring"""
snake_case_ = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(UpperCAmelCase_ )
snake_case_ = MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" , src_lang="""fr""" , tgt_lang="""en""" )
snake_case_ = [
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent"""
""" Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de"""
""" l'ampleur de la surveillance américaine sur l'ensemble des communications en France.""",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
snake_case_ = tokenizer(UpperCAmelCase_ , padding=UpperCAmelCase_ , return_tensors="""pt""" )
snake_case_ = model.generate(
input_ids=dct["""input_ids"""].to(UpperCAmelCase_ ) , attention_mask=dct["""attention_mask"""].to(UpperCAmelCase_ ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("""en""" ) , )
snake_case_ = [
"""The NSA case highlights the total absence of intelligence debate""",
"""I think there are two levels of response from the French government.""",
"""When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."""
""" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"""
""" communications in France.""",
]
snake_case_ = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
assert generated == expected_en
| 233 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """Salesforce/blip-image-captioning-base"""
lowerCamelCase__ = (
"""This is a tool that generates a description of an image. It takes an input named `image` which should be the """
"""image to caption, and returns a text that contains the description in English."""
)
lowerCamelCase__ = """image_captioner"""
lowerCamelCase__ = AutoModelForVisionaSeq
lowerCamelCase__ = ["""image"""]
lowerCamelCase__ = ["""text"""]
def __init__( self , *lowercase , **lowercase ):
requires_backends(self , ['vision'] )
super().__init__(*_a , **_a )
def A_ ( self , lowercase ):
return self.pre_processor(images=_a , return_tensors='pt' )
def A_ ( self , lowercase ):
return self.model.generate(**_a )
def A_ ( self , lowercase ):
return self.pre_processor.batch_decode(_a , skip_special_tokens=_a )[0].strip() | 96 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 281 | 0 |
'''simple docstring'''
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class __lowerCAmelCase :
def __init__(self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=9_9 , lowerCAmelCase__=6_4 , lowerCAmelCase__=3_2 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=3_7 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=1_6 , lowerCAmelCase__=2 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=None , ):
_UpperCAmelCase : str = parent
_UpperCAmelCase : Optional[int] = batch_size
_UpperCAmelCase : Optional[Any] = seq_length
_UpperCAmelCase : List[Any] = is_training
_UpperCAmelCase : str = use_input_mask
_UpperCAmelCase : Any = use_token_type_ids
_UpperCAmelCase : Union[str, Any] = use_labels
_UpperCAmelCase : Any = vocab_size
_UpperCAmelCase : Optional[int] = hidden_size
_UpperCAmelCase : Tuple = embedding_size
_UpperCAmelCase : List[str] = num_hidden_layers
_UpperCAmelCase : Optional[int] = num_attention_heads
_UpperCAmelCase : Any = intermediate_size
_UpperCAmelCase : List[Any] = hidden_act
_UpperCAmelCase : Tuple = hidden_dropout_prob
_UpperCAmelCase : int = attention_probs_dropout_prob
_UpperCAmelCase : Any = max_position_embeddings
_UpperCAmelCase : Any = type_vocab_size
_UpperCAmelCase : str = type_sequence_label_size
_UpperCAmelCase : Optional[int] = initializer_range
_UpperCAmelCase : str = num_labels
_UpperCAmelCase : Union[str, Any] = num_choices
_UpperCAmelCase : Any = scope
def snake_case_ (self ):
_UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : str = None
if self.use_input_mask:
_UpperCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : Tuple = None
if self.use_token_type_ids:
_UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase : Tuple = None
_UpperCAmelCase : Tuple = None
_UpperCAmelCase : Optional[int] = None
if self.use_labels:
_UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase : Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ (self ):
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : List[str] = MegatronBertModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
_UpperCAmelCase : str = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ )
_UpperCAmelCase : Any = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ )
_UpperCAmelCase : Union[str, Any] = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Optional[int] = MegatronBertForMaskedLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
_UpperCAmelCase : int = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : int = MegatronBertForCausalLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
_UpperCAmelCase : List[str] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Union[str, Any] = MegatronBertForNextSentencePrediction(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
_UpperCAmelCase : Any = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Optional[Any] = MegatronBertForPreTraining(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
_UpperCAmelCase : Optional[int] = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ , next_sentence_label=UpperCamelCase_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Union[str, Any] = MegatronBertForQuestionAnswering(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
_UpperCAmelCase : List[Any] = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Optional[int] = self.num_labels
_UpperCAmelCase : Optional[Any] = MegatronBertForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
_UpperCAmelCase : Dict = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : List[str] = self.num_labels
_UpperCAmelCase : Optional[int] = MegatronBertForTokenClassification(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
_UpperCAmelCase : Union[str, Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Union[str, Any] = self.num_choices
_UpperCAmelCase : int = MegatronBertForMultipleChoice(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
_UpperCAmelCase : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase : str = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase : Union[str, Any] = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case_ (self ):
_UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
_UpperCAmelCase : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
snake_case : Tuple = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case : Optional[int] = (
{
'feature-extraction': MegatronBertModel,
'fill-mask': MegatronBertForMaskedLM,
'question-answering': MegatronBertForQuestionAnswering,
'text-classification': MegatronBertForSequenceClassification,
'text-generation': MegatronBertForCausalLM,
'token-classification': MegatronBertForTokenClassification,
'zero-shot': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case : Optional[int] = True
# test_resize_embeddings = False
snake_case : List[str] = False
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ):
_UpperCAmelCase : List[str] = super()._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ )
if return_labels:
if model_class in get_values(UpperCamelCase_ ):
_UpperCAmelCase : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCamelCase_ )
_UpperCAmelCase : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase_ )
return inputs_dict
def snake_case_ (self ):
_UpperCAmelCase : Optional[int] = MegatronBertModelTester(self )
_UpperCAmelCase : Optional[int] = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=3_7 )
def snake_case_ (self ):
self.config_tester.run_common_tests()
def snake_case_ (self ):
_UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*UpperCamelCase_ )
def snake_case_ (self ):
_UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*UpperCamelCase_ )
def snake_case_ (self ):
_UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*UpperCamelCase_ )
def snake_case_ (self ):
_UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*UpperCamelCase_ )
def snake_case_ (self ):
_UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*UpperCamelCase_ )
def snake_case_ (self ):
_UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*UpperCamelCase_ )
def snake_case_ (self ):
_UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*UpperCamelCase_ )
def snake_case_ (self ):
_UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*UpperCamelCase_ )
def __A ( lowerCAmelCase_ ):
return torch.tensor(
A__ , dtype=torch.long , device=A__ , )
lowerCAmelCase_ : Union[str, Any] = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
@slow
@unittest.skip("""Model is not available.""" )
def snake_case_ (self ):
_UpperCAmelCase : int = """nvidia/megatron-bert-uncased-345m"""
if "MYDIR" in os.environ:
_UpperCAmelCase : Any = os.path.join(os.environ["""MYDIR"""] , UpperCamelCase_ )
_UpperCAmelCase : Tuple = MegatronBertModel.from_pretrained(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.half()
_UpperCAmelCase : Union[str, Any] = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] )
with torch.no_grad():
_UpperCAmelCase : Tuple = model(UpperCamelCase_ )[0]
_UpperCAmelCase : str = torch.Size((1, 9, 1_0_2_4) )
self.assertEqual(output.shape , UpperCamelCase_ )
_UpperCAmelCase : Tuple = [-0.6_0_4_0, -0.2_5_1_7, -0.1_0_2_5, 0.3_4_2_0, -0.6_7_5_8, -0.0_0_1_7, -0.1_0_8_9, -0.1_9_9_0, 0.5_7_2_8]
for ii in range(3 ):
for jj in range(3 ):
_UpperCAmelCase : Union[str, Any] = output[0, ii, jj]
_UpperCAmelCase : Optional[Any] = expected[3 * ii + jj]
_UpperCAmelCase : Optional[Any] = """ii={} jj={} a={} b={}""".format(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
self.assertTrue(math.isclose(UpperCamelCase_ , UpperCamelCase_ , rel_tol=UpperCamelCase_ , abs_tol=UpperCamelCase_ ) , msg=UpperCamelCase_ )
| 357 |
'''simple docstring'''
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
lowerCAmelCase_ : Any = logging.getLogger(__name__)
class __lowerCAmelCase ( __a ):
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None ):
_UpperCAmelCase : str = self.layer[current_layer](lowerCAmelCase__ , lowerCAmelCase__ , head_mask[current_layer] )
_UpperCAmelCase : List[Any] = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"""The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.""" , __a , )
class __lowerCAmelCase ( __a ):
def __init__(self , lowerCAmelCase__ ):
super().__init__(lowerCAmelCase__ )
_UpperCAmelCase : Tuple = BertEncoderWithPabee(lowerCAmelCase__ )
self.init_weights()
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : Union[str, Any] = 0
_UpperCAmelCase : Tuple = 0
_UpperCAmelCase : int = 0
def snake_case_ (self , lowerCAmelCase__ ):
_UpperCAmelCase : Union[str, Any] = threshold
def snake_case_ (self , lowerCAmelCase__ ):
_UpperCAmelCase : Union[str, Any] = patience
def snake_case_ (self ):
_UpperCAmelCase : int = 0
_UpperCAmelCase : Optional[Any] = 0
def snake_case_ (self ):
_UpperCAmelCase : Union[str, Any] = self.inference_layers_num / self.inference_instances_num
_UpperCAmelCase : Optional[int] = (
F"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="
F" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"
)
print(lowerCAmelCase__ )
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
def snake_case_ (self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=False , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
_UpperCAmelCase : Optional[Any] = input_ids.size()
elif inputs_embeds is not None:
_UpperCAmelCase : str = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
_UpperCAmelCase : Optional[Any] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_UpperCAmelCase : Optional[Any] = torch.ones(lowerCAmelCase__ , device=lowerCAmelCase__ )
if token_type_ids is None:
_UpperCAmelCase : Optional[int] = torch.zeros(lowerCAmelCase__ , dtype=torch.long , device=lowerCAmelCase__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_UpperCAmelCase : torch.Tensor = self.get_extended_attention_mask(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = encoder_hidden_states.size()
_UpperCAmelCase : List[str] = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
_UpperCAmelCase : Tuple = torch.ones(lowerCAmelCase__ , device=lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = self.invert_attention_mask(lowerCAmelCase__ )
else:
_UpperCAmelCase : List[str] = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_UpperCAmelCase : Any = self.get_head_mask(lowerCAmelCase__ , self.config.num_hidden_layers )
_UpperCAmelCase : int = self.embeddings(
input_ids=lowerCAmelCase__ , position_ids=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , inputs_embeds=lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = embedding_output
if self.training:
_UpperCAmelCase : Union[str, Any] = []
for i in range(self.config.num_hidden_layers ):
_UpperCAmelCase : Tuple = self.encoder.adaptive_forward(
lowerCAmelCase__ , current_layer=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , head_mask=lowerCAmelCase__ )
_UpperCAmelCase : Any = self.pooler(lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = output_layers[i](output_dropout(lowerCAmelCase__ ) )
res.append(lowerCAmelCase__ )
elif self.patience == 0: # Use all layers for inference
_UpperCAmelCase : int = self.encoder(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , head_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , )
_UpperCAmelCase : List[str] = self.pooler(encoder_outputs[0] )
_UpperCAmelCase : List[Any] = [output_layers[self.config.num_hidden_layers - 1](lowerCAmelCase__ )]
else:
_UpperCAmelCase : Union[str, Any] = 0
_UpperCAmelCase : Optional[Any] = None
_UpperCAmelCase : int = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
_UpperCAmelCase : int = self.encoder.adaptive_forward(
lowerCAmelCase__ , current_layer=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , head_mask=lowerCAmelCase__ )
_UpperCAmelCase : Tuple = self.pooler(lowerCAmelCase__ )
_UpperCAmelCase : int = output_layers[i](lowerCAmelCase__ )
if regression:
_UpperCAmelCase : List[Any] = logits.detach()
if patient_result is not None:
_UpperCAmelCase : Union[str, Any] = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
_UpperCAmelCase : List[str] = 0
else:
_UpperCAmelCase : Optional[int] = logits.detach().argmax(dim=1 )
if patient_result is not None:
_UpperCAmelCase : str = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(lowerCAmelCase__ ) ):
patient_counter += 1
else:
_UpperCAmelCase : Tuple = 0
_UpperCAmelCase : List[str] = logits
if patient_counter == self.patience:
break
_UpperCAmelCase : List[str] = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"""Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """ , __a , )
class __lowerCAmelCase ( __a ):
def __init__(self , lowerCAmelCase__ ):
super().__init__(lowerCAmelCase__ )
_UpperCAmelCase : int = config.num_labels
_UpperCAmelCase : List[Any] = BertModelWithPabee(lowerCAmelCase__ )
_UpperCAmelCase : int = nn.Dropout(config.hidden_dropout_prob )
_UpperCAmelCase : str = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
def snake_case_ (self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , ):
_UpperCAmelCase : Optional[int] = self.bert(
input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , position_ids=lowerCAmelCase__ , head_mask=lowerCAmelCase__ , inputs_embeds=lowerCAmelCase__ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
_UpperCAmelCase : Any = (logits[-1],)
if labels is not None:
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : int = 0
for ix, logits_item in enumerate(lowerCAmelCase__ ):
if self.num_labels == 1:
# We are doing regression
_UpperCAmelCase : Dict = MSELoss()
_UpperCAmelCase : List[Any] = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
_UpperCAmelCase : Optional[Any] = CrossEntropyLoss()
_UpperCAmelCase : Union[str, Any] = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
_UpperCAmelCase : Any = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
_UpperCAmelCase : Tuple = (total_loss / total_weights,) + outputs
return outputs
| 170 | 0 |
'''simple docstring'''
import csv
import tweepy
# Twitter API credentials
_A : Tuple =''''''
_A : str =''''''
_A : Dict =''''''
_A : List[Any] =''''''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> None:
# authorize twitter, initialize tweepy
lowerCamelCase__ : List[Any] = tweepy.OAuthHandler(UpperCamelCase , UpperCamelCase )
auth.set_access_token(UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Tuple = tweepy.API(UpperCamelCase )
# initialize a list to hold all the tweepy Tweets
lowerCamelCase__ : List[str] = []
# make initial request for most recent tweets (200 is the maximum allowed count)
lowerCamelCase__ : Optional[int] = api.user_timeline(screen_name=UpperCamelCase , count=200 )
# save most recent tweets
alltweets.extend(UpperCamelCase )
# save the id of the oldest tweet less one
lowerCamelCase__ : List[Any] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(UpperCamelCase ) > 0:
print(f'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
lowerCamelCase__ : Any = api.user_timeline(
screen_name=UpperCamelCase , count=200 , max_id=UpperCamelCase )
# save most recent tweets
alltweets.extend(UpperCamelCase )
# update the id of the oldest tweet less one
lowerCamelCase__ : Optional[Any] = alltweets[-1].id - 1
print(f'''...{len(UpperCamelCase )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
lowerCamelCase__ : Any = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f'''new_{screen_name}_tweets.csv''' , """w""" ) as f:
lowerCamelCase__ : str = csv.writer(UpperCamelCase )
writer.writerow(["""id""", """created_at""", """text"""] )
writer.writerows(UpperCamelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''')
| 41 |
from __future__ import annotations
UpperCAmelCase__ = "Muhammad Umer Farooq"
UpperCAmelCase__ = "MIT"
UpperCAmelCase__ = "1.0.0"
UpperCAmelCase__ = "Muhammad Umer Farooq"
UpperCAmelCase__ = "contact@muhammadumerfarooq.me"
UpperCAmelCase__ = "Alpha"
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class lowercase_ ( lowercase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __UpperCAmelCase : str ) ->None:
"""simple docstring"""
super().__init__()
a = []
a = domain
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : list[tuple[str, str | None]] ) ->None:
"""simple docstring"""
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
a = parse.urljoin(self.domain , __UpperCAmelCase )
self.urls.append(__UpperCAmelCase )
def _a ( a :str ) -> str:
return ".".join(get_sub_domain_name(a ).split('''.''' )[-2:] )
def _a ( a :str ) -> str:
return parse.urlparse(a ).netloc
def _a ( a :str = "https://github.com" ) -> list[str]:
a = get_domain_name(a )
# Initialize the parser
a = Parser(a )
try:
# Open URL
a = requests.get(a )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
a = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
a = requests.get(a )
# Get the valid email.
a = re.findall('''[a-zA-Z0-9]+@''' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(a )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(a )
if __name__ == "__main__":
UpperCAmelCase__ = emails_from_url("https://github.com")
print(f"""{len(emails)} emails found:""")
print("\n".join(sorted(emails)))
| 0 | 0 |
UpperCamelCase__ ={
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = set()
# keep track of all the paths to be checked
_SCREAMING_SNAKE_CASE : Union[str, Any] = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
_SCREAMING_SNAKE_CASE : List[Any] = queue.pop(0 )
# get the last node from the path
_SCREAMING_SNAKE_CASE : Union[str, Any] = path[-1]
if node not in explored:
_SCREAMING_SNAKE_CASE : Union[str, Any] = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
_SCREAMING_SNAKE_CASE : Optional[int] = list(__lowerCamelCase )
new_path.append(__lowerCamelCase )
queue.append(__lowerCamelCase )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(__lowerCamelCase )
# in case there's no path between the 2 nodes
return []
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
_SCREAMING_SNAKE_CASE : Any = [start]
_SCREAMING_SNAKE_CASE : Dict = set(__lowerCamelCase )
# Keep tab on distances from `start` node.
_SCREAMING_SNAKE_CASE : Tuple = {start: 0, target: -1}
while queue:
_SCREAMING_SNAKE_CASE : List[Any] = queue.pop(0 )
if node == target:
_SCREAMING_SNAKE_CASE : Dict = (
dist[node] if dist[target] == -1 else min(dist[target], dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(__lowerCamelCase )
queue.append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4 | 362 |
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = ['vqvae']
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> List[Any]:
super().__init__()
self.register_modules(unet=__lowerCamelCase , scheduler=__lowerCamelCase , mel=__lowerCamelCase , vqvae=__lowerCamelCase )
def UpperCamelCase_ ( self ) -> int:
return 5_0 if isinstance(self.scheduler , __lowerCamelCase ) else 1_0_0_0
@torch.no_grad()
def __call__( self , __lowerCamelCase = 1 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
_SCREAMING_SNAKE_CASE : List[str] = steps or self.get_default_steps()
self.scheduler.set_timesteps(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
_SCREAMING_SNAKE_CASE : Optional[int] = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=__lowerCamelCase , device=self.device , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = noise
_SCREAMING_SNAKE_CASE : Optional[int] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = self.mel.audio_slice_to_image(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = np.frombuffer(input_image.tobytes() , dtype="uint8" ).reshape(
(input_image.height, input_image.width) )
_SCREAMING_SNAKE_CASE : Optional[int] = (input_image / 2_5_5) * 2 - 1
_SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.vqvae.encode(torch.unsqueeze(__lowerCamelCase , 0 ) ).latent_dist.sample(
generator=__lowerCamelCase )[0]
_SCREAMING_SNAKE_CASE : int = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.add_noise(__lowerCamelCase , __lowerCamelCase , self.scheduler.timesteps[start_step - 1] )
_SCREAMING_SNAKE_CASE : int = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_SCREAMING_SNAKE_CASE : Optional[Any] = int(mask_start_secs * pixels_per_second )
_SCREAMING_SNAKE_CASE : Optional[int] = int(mask_end_secs * pixels_per_second )
_SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.add_noise(__lowerCamelCase , __lowerCamelCase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[str] = self.unet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )["sample"]
else:
_SCREAMING_SNAKE_CASE : str = self.unet(__lowerCamelCase , __lowerCamelCase )["sample"]
if isinstance(self.scheduler , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler.step(
model_output=__lowerCamelCase , timestep=__lowerCamelCase , sample=__lowerCamelCase , eta=__lowerCamelCase , generator=__lowerCamelCase , )["prev_sample"]
else:
_SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.step(
model_output=__lowerCamelCase , timestep=__lowerCamelCase , sample=__lowerCamelCase , generator=__lowerCamelCase , )["prev_sample"]
if mask is not None:
if mask_start > 0:
_SCREAMING_SNAKE_CASE : str = mask[:, step, :, :mask_start]
if mask_end > 0:
_SCREAMING_SNAKE_CASE : Dict = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_SCREAMING_SNAKE_CASE : Optional[Any] = 1 / self.vqvae.config.scaling_factor * images
_SCREAMING_SNAKE_CASE : Dict = self.vqvae.decode(__lowerCamelCase )["sample"]
_SCREAMING_SNAKE_CASE : Union[str, Any] = (images / 2 + 0.5).clamp(0 , 1 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
_SCREAMING_SNAKE_CASE : List[str] = (images * 2_5_5).round().astype("uint8" )
_SCREAMING_SNAKE_CASE : Tuple = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__lowerCamelCase , mode="RGB" ).convert("L" ) for _ in images) )
_SCREAMING_SNAKE_CASE : Tuple = [self.mel.image_to_audio(__lowerCamelCase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__lowerCamelCase )[:, np.newaxis, :] ) , **ImagePipelineOutput(__lowerCamelCase ) )
@torch.no_grad()
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = 5_0 ) -> np.ndarray:
assert isinstance(self.scheduler , __lowerCamelCase )
self.scheduler.set_timesteps(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = np.array(
[np.frombuffer(image.tobytes() , dtype="uint8" ).reshape((1, image.height, image.width) ) for image in images] )
_SCREAMING_SNAKE_CASE : Union[str, Any] = (sample / 2_5_5) * 2 - 1
_SCREAMING_SNAKE_CASE : Any = torch.Tensor(__lowerCamelCase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
_SCREAMING_SNAKE_CASE : Optional[int] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.alphas_cumprod[t]
_SCREAMING_SNAKE_CASE : List[str] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_SCREAMING_SNAKE_CASE : Optional[int] = 1 - alpha_prod_t
_SCREAMING_SNAKE_CASE : Optional[int] = self.unet(__lowerCamelCase , __lowerCamelCase )["sample"]
_SCREAMING_SNAKE_CASE : List[str] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_SCREAMING_SNAKE_CASE : str = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_SCREAMING_SNAKE_CASE : List[str] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def UpperCamelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> torch.Tensor:
_SCREAMING_SNAKE_CASE : Any = acos(torch.dot(torch.flatten(__lowerCamelCase ) , torch.flatten(__lowerCamelCase ) ) / torch.norm(__lowerCamelCase ) / torch.norm(__lowerCamelCase ) )
return sin((1 - alpha) * theta ) * xa / sin(__lowerCamelCase ) + sin(alpha * theta ) * xa / sin(__lowerCamelCase ) | 325 | 0 |
from jiwer import compute_measures
import datasets
UpperCAmelCase : str = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
UpperCAmelCase : List[str] = "\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n"
UpperCAmelCase : str = "\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> wer = datasets.load_metric(\"wer\")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
"""simple docstring"""
def __A ( self ) -> Tuple:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
] , )
def __A ( self , A=None , A=None , A=False ) -> List[Any]:
'''simple docstring'''
if concatenate_texts:
return compute_measures(A , A )["wer"]
else:
lowerCamelCase = 0
lowerCamelCase = 0
for prediction, reference in zip(A , A ):
lowerCamelCase = compute_measures(A , A )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 252 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase : Optional[Any] = {
"configuration_swiftformer": [
"SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SwiftFormerConfig",
"SwiftFormerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Dict = [
"SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwiftFormerForImageClassification",
"SwiftFormerModel",
"SwiftFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
UpperCAmelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 252 | 1 |
import math
import sys
def _lowerCAmelCase ( A__: Dict ):
'''simple docstring'''
if number != int(lowerCAmelCase__ ):
raise ValueError('''the value of input must be a natural number''' )
if number < 0:
raise ValueError('''the value of input must not be a negative number''' )
if number == 0:
return 1
UpperCAmelCase = [-1] * (number + 1)
UpperCAmelCase = 0
for i in range(1 , number + 1 ):
UpperCAmelCase = sys.maxsize
UpperCAmelCase = int(math.sqrt(lowerCAmelCase__ ) )
for j in range(1 , root + 1 ):
UpperCAmelCase = 1 + answers[i - (j**2)]
UpperCAmelCase = min(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 359 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ["NllbTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ["NllbTokenizerFast"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 152 | 0 |
'''simple docstring'''
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
assert isinstance(__magic_name__ , __magic_name__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Tuple = tmp_path / "cache"
UpperCAmelCase : int = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase : Optional[int] = SqlDatasetReader(
"dataset" , "sqlite:///" + sqlite_path , cache_dir=__magic_name__ , keep_in_memory=__magic_name__ ).read()
_check_sql_dataset(__magic_name__ , __magic_name__ )
@require_sqlalchemy
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = tmp_path / "cache"
UpperCAmelCase : Tuple = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCAmelCase : Union[str, Any] = features.copy() if features else default_expected_features
UpperCAmelCase : int = (
Features({feature: Value(__magic_name__ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase : str = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , features=__magic_name__ , cache_dir=__magic_name__ ).read()
_check_sql_dataset(__magic_name__ , __magic_name__ )
def lowercase ( __magic_name__ ):
'''simple docstring'''
with contextlib.closing(sqlitea.connect(__magic_name__ ) ) as con:
UpperCAmelCase : Tuple = con.cursor()
cur.execute("SELECT * FROM dataset" )
for row in cur:
yield row
@require_sqlalchemy
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = tmp_path / "cache"
UpperCAmelCase : str = os.path.join(__magic_name__ , "tmp.sql" )
UpperCAmelCase : int = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=__magic_name__ ).read()
SqlDatasetWriter(__magic_name__ , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=1 ).write()
UpperCAmelCase : Union[str, Any] = iter_sql_file(__magic_name__ )
UpperCAmelCase : List[str] = iter_sql_file(__magic_name__ )
for rowa, rowa in zip(__magic_name__ , __magic_name__ ):
assert rowa == rowa
@require_sqlalchemy
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = tmp_path / "cache"
UpperCAmelCase : List[Any] = os.path.join(__magic_name__ , "tmp.sql" )
UpperCAmelCase : Dict = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=__magic_name__ ).read()
SqlDatasetWriter(__magic_name__ , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=2 ).write()
UpperCAmelCase : List[Any] = iter_sql_file(__magic_name__ )
UpperCAmelCase : int = iter_sql_file(__magic_name__ )
for rowa, rowa in zip(__magic_name__ , __magic_name__ ):
assert rowa == rowa
@require_sqlalchemy
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = tmp_path / "cache"
UpperCAmelCase : Optional[int] = os.path.join(__magic_name__ , "tmp.sql" )
UpperCAmelCase : List[Any] = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=__magic_name__ ).read()
with pytest.raises(__magic_name__ ):
SqlDatasetWriter(__magic_name__ , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=0 ).write()
| 311 |
'''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
a : Optional[int] = _symbol_database.Default()
a : Any = _descriptor_pool.Default().AddSerializedFile(
B"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"
)
a : Tuple = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
a : str = None
a : Optional[Any] = B"H\003"
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
a : str = 45
a : Any = 15_81
a : List[Any] = 15_17
a : Union[str, Any] = 15_70
a : Optional[Any] = 15_84
a : List[str] = 17_93
a : Optional[Any] = 17_95
a : Tuple = 19_16
a : Optional[Any] = 18_64
a : int = 19_05
a : Optional[Any] = 19_19
a : Union[str, Any] = 24_29
a : List[Any] = 22_08
a : Dict = 24_18
a : Optional[int] = 23_23
a : str = 24_07
# @@protoc_insertion_point(module_scope)
| 311 | 1 |
def UpperCamelCase ( __lowerCamelCase : int = 50 ):
snake_case : Tuple = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F'{solution() = }')
| 356 |
def UpperCamelCase ( __lowerCamelCase : int ):
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError("only integers accepted as input" )
else:
snake_case : Dict = str(abs(__lowerCamelCase ) )
snake_case : Dict = [list(__lowerCamelCase ) for char in range(len(__lowerCamelCase ) )]
for index in range(len(__lowerCamelCase ) ):
num_transpositions[index].pop(__lowerCamelCase )
return max(
int("".join(list(__lowerCamelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 10 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE = {
"configuration_blip_2": [
"BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Blip2Config",
"Blip2QFormerConfig",
"Blip2VisionConfig",
],
"processing_blip_2": ["Blip2Processor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Blip2Model",
"Blip2QFormerModel",
"Blip2PreTrainedModel",
"Blip2ForConditionalGeneration",
"Blip2VisionModel",
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 247 |
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , ) -> tuple:
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 247 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase ={
"configuration_mobilebert": [
"MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileBertConfig",
"MobileBertOnnxConfig",
],
"tokenization_mobilebert": ["MobileBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =["MobileBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileBertForMaskedLM",
"MobileBertForMultipleChoice",
"MobileBertForNextSentencePrediction",
"MobileBertForPreTraining",
"MobileBertForQuestionAnswering",
"MobileBertForSequenceClassification",
"MobileBertForTokenClassification",
"MobileBertLayer",
"MobileBertModel",
"MobileBertPreTrainedModel",
"load_tf_weights_in_mobilebert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileBertForMaskedLM",
"TFMobileBertForMultipleChoice",
"TFMobileBertForNextSentencePrediction",
"TFMobileBertForPreTraining",
"TFMobileBertForQuestionAnswering",
"TFMobileBertForSequenceClassification",
"TFMobileBertForTokenClassification",
"TFMobileBertMainLayer",
"TFMobileBertModel",
"TFMobileBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
__UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 371 | '''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
__UpperCAmelCase =None
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ={"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase ={
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
},
"tokenizer_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/tokenizer.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/tokenizer.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/tokenizer.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/tokenizer.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/tokenizer.json",
},
}
# TODO(PVP) - this should be removed in Transformers v5
__UpperCAmelCase ={
"t5-small": 5_1_2,
"t5-base": 5_1_2,
"t5-large": 5_1_2,
"t5-3b": 5_1_2,
"t5-11b": 5_1_2,
}
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Dict =VOCAB_FILES_NAMES
lowerCamelCase : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Dict =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : int =["input_ids", "attention_mask"]
lowerCamelCase : Tuple =TaTokenizer
lowerCamelCase : List[int] =[]
def __init__( self : int , a : List[str]=None , a : List[str]=None , a : Dict="</s>" , a : Optional[int]="<unk>" , a : Any="<pad>" , a : Optional[Any]=1_00 , a : List[Any]=None , **a : Union[str, Any] , ):
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
__lowerCamelCase = [f"""<extra_id_{i}>""" for i in range(a )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
__lowerCamelCase = len(set(filter(lambda a : bool('''extra_id_''' in str(a ) ) , a ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
a , tokenizer_file=a , eos_token=a , unk_token=a , pad_token=a , extra_ids=a , additional_special_tokens=a , **a , )
__lowerCamelCase = vocab_file
__lowerCamelCase = False if not self.vocab_file else True
__lowerCamelCase = extra_ids
@staticmethod
def SCREAMING_SNAKE_CASE__ ( a : Optional[int] , a : List[str] , a : Union[str, Any] ):
"""simple docstring"""
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
__lowerCamelCase = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f""" {pretrained_model_name_or_path} automatically truncating your input to"""
f""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
f""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , a , )
return max_model_length
def SCREAMING_SNAKE_CASE__ ( self : Tuple , a : str , a : Optional[str] = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCamelCase = os.path.join(
a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ):
copyfile(self.vocab_file , a )
logger.info(f"""Copy vocab file to {out_vocab_file}""" )
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE__ ( self : int , a : List[int] , a : Optional[List[int]] = None ):
"""simple docstring"""
__lowerCamelCase = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
__lowerCamelCase = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : List[int] , a : Optional[List[int]] = None ):
"""simple docstring"""
__lowerCamelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
return list(
set(filter(lambda a : bool(re.search(R'''<extra_id_\d+>''' , a ) ) is not None , self.additional_special_tokens ) ) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
return [self.convert_tokens_to_ids(a ) for token in self.get_sentinel_tokens()]
| 237 | 0 |
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : List[Any] = CustomTokenizer
pass
| 48 |
from __future__ import annotations
def _lowerCAmelCase ( lowerCAmelCase_ :float , lowerCAmelCase_ :float , lowerCAmelCase_ :float )->float:
'''simple docstring'''
if days_between_payments <= 0:
raise ValueError("days_between_payments must be > 0" )
if daily_interest_rate < 0:
raise ValueError("daily_interest_rate must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return principal * daily_interest_rate * days_between_payments
def _lowerCAmelCase ( lowerCAmelCase_ :float , lowerCAmelCase_ :float , lowerCAmelCase_ :float , )->float:
'''simple docstring'''
if number_of_compounding_periods <= 0:
raise ValueError("number_of_compounding_periods must be > 0" )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError("nominal_annual_interest_rate_percentage must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def _lowerCAmelCase ( lowerCAmelCase_ :float , lowerCAmelCase_ :float , lowerCAmelCase_ :float , )->float:
'''simple docstring'''
if number_of_years <= 0:
raise ValueError("number_of_years must be > 0" )
if nominal_annual_percentage_rate < 0:
raise ValueError("nominal_annual_percentage_rate must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return compound_interest(
lowerCAmelCase_ , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 159 | 0 |
'''simple docstring'''
from typing import Any
import numpy as np
def __snake_case( _lowerCAmelCase ) -> bool:
return np.array_equal(snake_case__ , matrix.conjugate().T )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Any:
snake_case__ : str = v.conjugate().T
snake_case__ : int = v_star.dot(snake_case__ )
assert isinstance(snake_case__ , np.ndarray )
return (v_star_dot.dot(snake_case__ )) / (v_star.dot(snake_case__ ))
def __snake_case( ) -> None:
snake_case__ : List[str] = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
snake_case__ : Any = np.array([[1], [2], [3]] )
assert is_hermitian(snake_case__ ), f"{a} is not hermitian."
print(rayleigh_quotient(snake_case__ , snake_case__ ) )
snake_case__ : List[Any] = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(snake_case__ ), f"{a} is not hermitian."
assert rayleigh_quotient(snake_case__ , snake_case__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 364 |
'''simple docstring'''
def __snake_case( ) -> list[list[int]]:
return [list(range(1_000 - i , -1_000 - i , -1 ) ) for i in range(1_000 )]
__a = generate_large_matrix()
__a = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __snake_case( _lowerCAmelCase ) -> None:
assert all(row == sorted(_lowerCAmelCase , reverse=_lowerCAmelCase ) for row in grid )
assert all(list(_lowerCAmelCase ) == sorted(_lowerCAmelCase , reverse=_lowerCAmelCase ) for col in zip(*_lowerCAmelCase ) )
def __snake_case( _lowerCAmelCase ) -> int:
snake_case__ : List[str] = 0
snake_case__ : str = len(_lowerCAmelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
snake_case__ : List[Any] = (left + right) // 2
snake_case__ : Tuple = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
snake_case__ : Tuple = mid + 1
else:
snake_case__ : Tuple = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(_lowerCAmelCase )
def __snake_case( _lowerCAmelCase ) -> int:
snake_case__ : Optional[int] = 0
snake_case__ : Optional[int] = len(grid[0] )
for i in range(len(_lowerCAmelCase ) ):
snake_case__ : Any = find_negative_index(grid[i][:bound] )
total += bound
return (len(_lowerCAmelCase ) * len(grid[0] )) - total
def __snake_case( _lowerCAmelCase ) -> int:
return len([number for row in grid for number in row if number < 0] )
def __snake_case( _lowerCAmelCase ) -> int:
snake_case__ : List[Any] = 0
for row in grid:
for i, number in enumerate(_lowerCAmelCase ):
if number < 0:
total += len(_lowerCAmelCase ) - i
break
return total
def __snake_case( ) -> None:
from timeit import timeit
print("""Running benchmarks""" )
snake_case__ : int = (
"""from __main__ import count_negatives_binary_search, """
"""count_negatives_brute_force, count_negatives_brute_force_with_break, grid"""
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
snake_case__ : Tuple = timeit(f"{func}(grid=grid)" , setup=_lowerCAmelCase , number=500 )
print(f"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 43 | 0 |
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
lowerCAmelCase = TypeVar('T')
lowerCAmelCase = TypeVar('U')
class _a ( Generic[T, U] ):
def __init__( self: Any , UpperCamelCase_: T | None , UpperCamelCase_: U | None ) -> int:
"""simple docstring"""
lowercase__ = key
lowercase__ = val
lowercase__ = None
lowercase__ = None
def __repr__( self: Union[str, Any] ) -> Tuple:
"""simple docstring"""
return (
f'Node: key: {self.key}, val: {self.val}, '
f'has next: {bool(self.next )}, has prev: {bool(self.prev )}'
)
class _a ( Generic[T, U] ):
def __init__( self: List[Any] ) -> str:
"""simple docstring"""
lowercase__ = DoubleLinkedListNode(_lowerCAmelCase , _lowerCAmelCase )
lowercase__ = DoubleLinkedListNode(_lowerCAmelCase , _lowerCAmelCase )
lowercase__ , lowercase__ = self.rear, self.head
def __repr__( self: Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase__ = ['''DoubleLinkedList''']
lowercase__ = self.head
while node.next is not None:
rep.append(str(_lowerCAmelCase ) )
lowercase__ = node.next
rep.append(str(self.rear ) )
return ",\n ".join(_lowerCAmelCase )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: DoubleLinkedListNode[T, U] ) -> Tuple:
"""simple docstring"""
lowercase__ = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
lowercase__ = node
lowercase__ = previous
lowercase__ = node
lowercase__ = self.rear
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: DoubleLinkedListNode[T, U] ) -> Union[str, Any]:
"""simple docstring"""
if node.prev is None or node.next is None:
return None
lowercase__ = node.next
lowercase__ = node.prev
lowercase__ = None
lowercase__ = None
return node
class _a ( Generic[T, U] ):
_lowercase : Optional[Any] = {}
def __init__( self: Any , UpperCamelCase_: int ) -> List[Any]:
"""simple docstring"""
lowercase__ = DoubleLinkedList()
lowercase__ = capacity
lowercase__ = 0
lowercase__ = 0
lowercase__ = 0
lowercase__ = {}
def __repr__( self: Optional[int] ) -> Dict:
"""simple docstring"""
return (
f'CacheInfo(hits={self.hits}, misses={self.miss}, '
f'capacity={self.capacity}, current size={self.num_keys})'
)
def __contains__( self: List[str] , UpperCamelCase_: T ) -> Tuple:
"""simple docstring"""
return key in self.cache
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: T ) -> Optional[Any]:
"""simple docstring"""
if key in self.cache:
self.hits += 1
lowercase__ = self.cache[key]
lowercase__ = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(_lowerCAmelCase )
return node.val
self.miss += 1
return None
def lowerCamelCase_ ( self: int , UpperCamelCase_: T , UpperCamelCase_: U ) -> Tuple:
"""simple docstring"""
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
lowercase__ = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(_lowerCAmelCase ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
lowercase__ = DoubleLinkedListNode(_lowerCAmelCase , _lowerCAmelCase )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
lowercase__ = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
lowercase__ = value
self.list.add(_lowerCAmelCase )
@classmethod
def lowerCamelCase_ ( cls: int , UpperCamelCase_: int = 128 ) -> List[str]:
"""simple docstring"""
def cache_decorator_inner(UpperCamelCase_: Callable[[T], U] ) -> Callable[..., U]:
def cache_decorator_wrapper(*UpperCamelCase_: T ) -> U:
if func not in cls.decorator_function_to_instance_map:
lowercase__ = LRUCache(_lowerCAmelCase )
lowercase__ = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
lowercase__ = func(*_lowerCAmelCase )
cls.decorator_function_to_instance_map[func].put(args[0] , _lowerCAmelCase )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(_lowerCAmelCase , '''cache_info''' , _lowerCAmelCase ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 110 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Tuple=13 , _lowerCAmelCase : List[str]=7 , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : Any=True , _lowerCAmelCase : str=99 , _lowerCAmelCase : List[str]=32 , _lowerCAmelCase : Tuple=5 , _lowerCAmelCase : List[str]=4 , _lowerCAmelCase : str=37 , _lowerCAmelCase : Any="gelu" , _lowerCAmelCase : str=0.1 , _lowerCAmelCase : Union[str, Any]=0.1 , _lowerCAmelCase : Optional[int]=512 , _lowerCAmelCase : Union[str, Any]=16 , _lowerCAmelCase : Tuple=2 , _lowerCAmelCase : Tuple=0.02 , _lowerCAmelCase : Tuple=3 , _lowerCAmelCase : Dict=4 , _lowerCAmelCase : Union[str, Any]=None , ):
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = seq_length
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_input_mask
SCREAMING_SNAKE_CASE_ = use_token_type_ids
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = type_sequence_label_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = num_choices
SCREAMING_SNAKE_CASE_ = scope
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : Optional[int] ):
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = LlamaModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[str] , ):
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = LlamaModel(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , ):
SCREAMING_SNAKE_CASE_ = LlamaForCausalLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[str] , ):
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = LlamaForCausalLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE_ = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , use_cache=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE_ = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE_ = torch.cat([input_mask, next_mask] , dim=-1 )
SCREAMING_SNAKE_CASE_ = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , )['hidden_states'][0]
SCREAMING_SNAKE_CASE_ = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , )['hidden_states'][0]
# select random slice
SCREAMING_SNAKE_CASE_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE_ = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE_ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
lowercase_ = (LlamaForCausalLM,) if is_torch_available() else ()
lowercase_ = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = LlamaModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase_ ( self : Any ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE_ = type
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ = input_ids.ne(1 ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = LlamaForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = 'single_label_classification'
SCREAMING_SNAKE_CASE_ = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ = input_ids.ne(1 ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = LlamaForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = 'multi_label_classification'
SCREAMING_SNAKE_CASE_ = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ = input_ids.ne(1 ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE_ = LlamaForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def lowerCAmelCase_ ( self : int ):
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : Tuple ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = ids_tensor([1, 10] , config.vocab_size )
SCREAMING_SNAKE_CASE_ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE_ = LlamaModel(_lowerCAmelCase )
original_model.to(_lowerCAmelCase )
original_model.eval()
SCREAMING_SNAKE_CASE_ = original_model(_lowerCAmelCase ).last_hidden_state
SCREAMING_SNAKE_CASE_ = original_model(_lowerCAmelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE_ = {'type': scaling_type, 'factor': 10.0}
SCREAMING_SNAKE_CASE_ = LlamaModel(_lowerCAmelCase )
scaled_model.to(_lowerCAmelCase )
scaled_model.eval()
SCREAMING_SNAKE_CASE_ = scaled_model(_lowerCAmelCase ).last_hidden_state
SCREAMING_SNAKE_CASE_ = scaled_model(_lowerCAmelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-5 ) )
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
SCREAMING_SNAKE_CASE_ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
SCREAMING_SNAKE_CASE_ = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) , _lowerCAmelCase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _lowerCAmelCase , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
SCREAMING_SNAKE_CASE_ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
SCREAMING_SNAKE_CASE_ = model(torch.tensor(_lowerCAmelCase ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) , _lowerCAmelCase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _lowerCAmelCase , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
SCREAMING_SNAKE_CASE_ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
SCREAMING_SNAKE_CASE_ = model(torch.tensor(_lowerCAmelCase ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) , _lowerCAmelCase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , _lowerCAmelCase , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
SCREAMING_SNAKE_CASE_ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
SCREAMING_SNAKE_CASE_ = model(torch.tensor(_lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , _lowerCAmelCase , atol=1E-2 , rtol=1E-2 )
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _lowerCAmelCase , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Model is curently gated' )
@slow
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
SCREAMING_SNAKE_CASE_ = 'Simply put, the theory of relativity states that '
SCREAMING_SNAKE_CASE_ = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
SCREAMING_SNAKE_CASE_ = tokenizer.encode(_lowerCAmelCase , return_tensors='pt' )
SCREAMING_SNAKE_CASE_ = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=_lowerCAmelCase )
# greedy generation outputs
SCREAMING_SNAKE_CASE_ = model.generate(_lowerCAmelCase , max_new_tokens=64 , top_p=_lowerCAmelCase , temperature=1 , do_sample=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = tokenizer.decode(generated_ids[0] , skip_special_tokens=_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) | 225 | 0 |
import math
def UpperCamelCase_( snake_case__: int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCamelCase_( snake_case__: float = 0.1 ) -> int:
UpperCAmelCase__ = 3
UpperCAmelCase__ = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(snake_case__ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 335 |
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def UpperCamelCase_( snake_case__: Optional[int] , snake_case__: List[Any] , snake_case__: Union[str, Any] ) -> Tuple:
UpperCAmelCase__ = OmegaConf.load(snake_case__ )
UpperCAmelCase__ = torch.load(snake_case__ , map_location='cpu' )['model']
UpperCAmelCase__ = list(state_dict.keys() )
# extract state_dict for VQVAE
UpperCAmelCase__ = {}
UpperCAmelCase__ = 'first_stage_model.'
for key in keys:
if key.startswith(snake_case__ ):
UpperCAmelCase__ = state_dict[key]
# extract state_dict for UNetLDM
UpperCAmelCase__ = {}
UpperCAmelCase__ = 'model.diffusion_model.'
for key in keys:
if key.startswith(snake_case__ ):
UpperCAmelCase__ = state_dict[key]
UpperCAmelCase__ = config.model.params.first_stage_config.params
UpperCAmelCase__ = config.model.params.unet_config.params
UpperCAmelCase__ = VQModel(**snake_case__ ).eval()
vqvae.load_state_dict(snake_case__ )
UpperCAmelCase__ = UNetLDMModel(**snake_case__ ).eval()
unet.load_state_dict(snake_case__ )
UpperCAmelCase__ = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='scaled_linear' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=snake_case__ , )
UpperCAmelCase__ = LDMPipeline(snake_case__ , snake_case__ , snake_case__ )
pipeline.save_pretrained(snake_case__ )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
_UpperCamelCase = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 335 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase_ ( __a , __a , __a , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ : str = AltDiffusionPipeline
UpperCamelCase__ : Any = TEXT_TO_IMAGE_PARAMS
UpperCamelCase__ : Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase__ : Any = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase__ : str = TEXT_TO_IMAGE_IMAGE_PARAMS
def _A ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
__SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=__a , set_alpha_to_one=__a , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_002 , )
__SCREAMING_SNAKE_CASE = CLIPTextModel(__a )
__SCREAMING_SNAKE_CASE = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
__SCREAMING_SNAKE_CASE = 77
__SCREAMING_SNAKE_CASE = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _A ( self , _A , _A=0 ):
'''simple docstring'''
if str(__a ).startswith('mps' ):
__SCREAMING_SNAKE_CASE = torch.manual_seed(__a )
else:
__SCREAMING_SNAKE_CASE = torch.Generator(device=__a ).manual_seed(__a )
__SCREAMING_SNAKE_CASE = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _A ( self ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def _A ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_002 , )
# TODO: remove after fixing the non-deterministic text encoder
__SCREAMING_SNAKE_CASE = RobertaSeriesModelWithTransformation(__a )
__SCREAMING_SNAKE_CASE = text_encoder
__SCREAMING_SNAKE_CASE = AltDiffusionPipeline(**__a )
__SCREAMING_SNAKE_CASE = alt_pipe.to(__a )
alt_pipe.set_progress_bar_config(disable=__a )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(__a )
__SCREAMING_SNAKE_CASE = """A photo of an astronaut"""
__SCREAMING_SNAKE_CASE = alt_pipe(**__a )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__SCREAMING_SNAKE_CASE = np.array(
[0.5_7_4_8_1_6_2, 0.6_0_4_4_7_1_4_5, 0.4_8_8_2_1_2_1_7, 0.5_0_1_0_0_6_3_6, 0.5_4_3_1_1_8_5, 0.4_5_7_6_3_6_8_3, 0.4_9_6_5_7_6_9_6, 0.4_8_1_3_2_7_3_3, 0.4_7_5_7_3_0_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=__a )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_002 , )
# TODO: remove after fixing the non-deterministic text encoder
__SCREAMING_SNAKE_CASE = RobertaSeriesModelWithTransformation(__a )
__SCREAMING_SNAKE_CASE = text_encoder
__SCREAMING_SNAKE_CASE = AltDiffusionPipeline(**__a )
__SCREAMING_SNAKE_CASE = alt_pipe.to(__a )
alt_pipe.set_progress_bar_config(disable=__a )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(__a )
__SCREAMING_SNAKE_CASE = alt_pipe(**__a )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__SCREAMING_SNAKE_CASE = np.array(
[0.5_1_6_0_5_0_9_3, 0.5_7_0_7_2_4_1, 0.4_7_3_6_5_5_0_7, 0.5_0_5_7_8_8_8_6, 0.5_6_3_3_8_7_7, 0.4_6_4_2_5_0_3, 0.5_1_8_2_0_8_1, 0.4_8_7_6_3_4_8_4, 0.4_9_0_8_4_2_3_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _A ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' , safety_checker=__a )
__SCREAMING_SNAKE_CASE = alt_pipe.to(__a )
alt_pipe.set_progress_bar_config(disable=__a )
__SCREAMING_SNAKE_CASE = """A painting of a squirrel eating a burger"""
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = alt_pipe([prompt] , generator=__a , guidance_scale=6.0 , num_inference_steps=20 , output_type='np' )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE = np.array([0.1_0_1_0, 0.0_8_0_0, 0.0_7_9_4, 0.0_8_8_5, 0.0_8_4_3, 0.0_7_6_2, 0.0_7_6_9, 0.0_7_2_9, 0.0_5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = DDIMScheduler.from_pretrained('BAAI/AltDiffusion' , subfolder='scheduler' )
__SCREAMING_SNAKE_CASE = AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' , scheduler=__a , safety_checker=__a )
__SCREAMING_SNAKE_CASE = alt_pipe.to(__a )
alt_pipe.set_progress_bar_config(disable=__a )
__SCREAMING_SNAKE_CASE = """A painting of a squirrel eating a burger"""
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = alt_pipe([prompt] , generator=__a , num_inference_steps=2 , output_type='numpy' )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE = np.array([0.4_0_1_9, 0.4_0_5_2, 0.3_8_1_0, 0.4_1_1_9, 0.3_9_1_6, 0.3_9_8_2, 0.4_6_5_1, 0.4_1_9_5, 0.5_3_2_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 257 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : List[str] = {
'''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : List[Any] = '''canine'''
def __init__( self : List[str] , __a : Optional[int]=768 , __a : Any=12 , __a : Any=12 , __a : Dict=3072 , __a : Dict="gelu" , __a : List[Any]=0.1 , __a : List[Any]=0.1 , __a : Tuple=16384 , __a : List[Any]=16 , __a : List[Any]=0.02 , __a : Optional[Any]=1E-12 , __a : Dict=0 , __a : List[Any]=0xe_0_0_0 , __a : Optional[int]=0xe_0_0_1 , __a : Any=4 , __a : Dict=4 , __a : Optional[int]=8 , __a : Any=16384 , __a : Optional[Any]=128 , **__a : List[str] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
__lowercase : int = max_position_embeddings
__lowercase : List[str] = hidden_size
__lowercase : List[Any] = num_hidden_layers
__lowercase : Dict = num_attention_heads
__lowercase : int = intermediate_size
__lowercase : Dict = hidden_act
__lowercase : Tuple = hidden_dropout_prob
__lowercase : Union[str, Any] = attention_probs_dropout_prob
__lowercase : Union[str, Any] = initializer_range
__lowercase : Any = type_vocab_size
__lowercase : int = layer_norm_eps
# Character config:
__lowercase : int = downsampling_rate
__lowercase : str = upsampling_kernel_size
__lowercase : Union[str, Any] = num_hash_functions
__lowercase : Optional[Any] = num_hash_buckets
__lowercase : Optional[int] = local_transformer_stride | 233 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase__ = {"configuration_vit_mae": ["VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMAEConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTMAEForPreTraining",
"ViTMAELayer",
"ViTMAEModel",
"ViTMAEPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"TFViTMAEForPreTraining",
"TFViTMAEModel",
"TFViTMAEPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 310 |
"""simple docstring"""
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = GPTaTokenizer
SCREAMING_SNAKE_CASE__ :Tuple = GPTaTokenizerFast
SCREAMING_SNAKE_CASE__ :Dict = True
SCREAMING_SNAKE_CASE__ :int = {"add_prefix_space": True}
SCREAMING_SNAKE_CASE__ :Optional[Any] = False
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCamelCase : List[str] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
_UpperCamelCase : Tuple = dict(zip(__a , range(len(__a ) ) ) )
_UpperCamelCase : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_UpperCamelCase : str = {"unk_token": "<unk>"}
_UpperCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__a ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__a ) )
def __SCREAMING_SNAKE_CASE ( self : Any , **__a : Optional[int] ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , **__a : Union[str, Any] ) -> int:
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Any ) -> Tuple:
_UpperCamelCase : List[Any] = "lower newer"
_UpperCamelCase : Union[str, Any] = "lower newer"
return input_text, output_text
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
_UpperCamelCase : Dict = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_UpperCamelCase : Optional[Any] = "lower newer"
_UpperCamelCase : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
_UpperCamelCase : Any = tokenizer.tokenize(__a , add_prefix_space=__a )
self.assertListEqual(__a , __a )
_UpperCamelCase : str = tokens + [tokenizer.unk_token]
_UpperCamelCase : str = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
if not self.test_rust_tokenizer:
return
_UpperCamelCase : Any = self.get_tokenizer()
_UpperCamelCase : List[str] = self.get_rust_tokenizer(add_prefix_space=__a )
_UpperCamelCase : Optional[Any] = "lower newer"
# Testing tokenization
_UpperCamelCase : str = tokenizer.tokenize(__a , add_prefix_space=__a )
_UpperCamelCase : List[str] = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids without special tokens
_UpperCamelCase : List[str] = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
_UpperCamelCase : Optional[Any] = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids with special tokens
_UpperCamelCase : Optional[int] = self.get_rust_tokenizer(add_prefix_space=__a )
_UpperCamelCase : List[Any] = tokenizer.encode(__a , add_prefix_space=__a )
_UpperCamelCase : List[str] = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
# Testing the unknown token
_UpperCamelCase : Optional[int] = tokens + [rust_tokenizer.unk_token]
_UpperCamelCase : int = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__a ) , __a )
def __SCREAMING_SNAKE_CASE ( self : int , *__a : int , **__a : List[Any] ) -> Union[str, Any]:
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : int=15 ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase : str = self.rust_tokenizer_class.from_pretrained(__a , **__a )
# Simple input
_UpperCamelCase : Optional[int] = "This is a simple input"
_UpperCamelCase : List[str] = ["This is a simple input 1", "This is a simple input 2"]
_UpperCamelCase : Dict = ("This is a simple input", "This is a pair")
_UpperCamelCase : Any = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding="max_length" )
# Simple input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding="max_length" )
# Simple input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding="max_length" , )
# Pair input
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding="max_length" )
# Pair input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding="max_length" )
# Pair input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding="max_length" , )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
_UpperCamelCase : Dict = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
_UpperCamelCase : Union[str, Any] = "This is a simple input"
_UpperCamelCase : Optional[Any] = ["This is a simple input looooooooong", "This is a simple input"]
_UpperCamelCase : str = ("This is a simple input", "This is a pair")
_UpperCamelCase : List[str] = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
_UpperCamelCase : Union[str, Any] = tokenizer.pad_token_id
_UpperCamelCase : str = tokenizer(__a , padding="max_length" , max_length=30 , return_tensors="np" )
_UpperCamelCase : Tuple = tokenizer(__a , padding=__a , truncate=__a , return_tensors="np" )
_UpperCamelCase : str = tokenizer(*__a , padding="max_length" , max_length=60 , return_tensors="np" )
_UpperCamelCase : Optional[int] = tokenizer(__a , padding=__a , truncate=__a , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
_UpperCamelCase : Any = "$$$"
_UpperCamelCase : Any = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=__a , add_bos_token=__a )
_UpperCamelCase : int = "This is a simple input"
_UpperCamelCase : Tuple = ["This is a simple input 1", "This is a simple input 2"]
_UpperCamelCase : Union[str, Any] = tokenizer.bos_token_id
_UpperCamelCase : str = tokenizer(__a )
_UpperCamelCase : Optional[Any] = tokenizer(__a )
self.assertEqual(out_s.input_ids[0] , __a )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_UpperCamelCase : Optional[Any] = tokenizer.decode(out_s.input_ids )
_UpperCamelCase : int = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __a )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
pass
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
# TODO: change to self.get_tokenizers() when the fast version is implemented
_UpperCamelCase : Optional[Any] = [self.get_tokenizer(do_lower_case=__a , add_bos_token=__a )]
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_UpperCamelCase : Tuple = "Encode this."
_UpperCamelCase : List[str] = "This one too please."
_UpperCamelCase : Optional[int] = tokenizer.encode(__a , add_special_tokens=__a )
encoded_sequence += tokenizer.encode(__a , add_special_tokens=__a )
_UpperCamelCase : int = tokenizer.encode_plus(
__a , __a , add_special_tokens=__a , return_special_tokens_mask=__a , )
_UpperCamelCase : str = encoded_sequence_dict["input_ids"]
_UpperCamelCase : Optional[int] = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(__a ) , len(__a ) )
_UpperCamelCase : Union[str, Any] = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(__a )
]
_UpperCamelCase : Union[str, Any] = [x for x in filtered_sequence if x is not None]
self.assertEqual(__a , __a )
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
_UpperCamelCase : Tuple = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=__a )
_UpperCamelCase : List[Any] = "A photo of a cat"
_UpperCamelCase : Any = tokenizer.encode(
__a , )
self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("test_opt" )
_UpperCamelCase : str = AutoTokenizer.from_pretrained("./test_opt" )
_UpperCamelCase : Optional[Any] = tokenizer.encode(
__a , )
self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
_UpperCamelCase : int = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=__a )
_UpperCamelCase : List[Any] = "A photo of a cat"
_UpperCamelCase : Union[str, Any] = tokenizer.encode(
__a , )
# Same as above
self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] )
@unittest.skip("This test is failing because of a bug in the fast tokenizer" )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
_UpperCamelCase : Dict = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=__a )
_UpperCamelCase : List[str] = "bos"
_UpperCamelCase : Tuple = tokenizer.get_vocab()["bos"]
_UpperCamelCase : List[Any] = "A photo of a cat"
_UpperCamelCase : List[Any] = tokenizer.encode(
__a , )
# We changed the bos token
self.assertEqual(__a , [3_1957, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("./tok" )
_UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("./tok" )
self.assertTrue(tokenizer.is_fast )
_UpperCamelCase : Tuple = tokenizer.encode(
__a , )
self.assertEqual(__a , [3_1957, 250, 1345, 9, 10, 4758] )
| 310 | 1 |
"""simple docstring"""
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Dict = R"""\w+[.]\d+"""
SCREAMING_SNAKE_CASE__ : Dict = re.findall(_snake_case ,_snake_case )
for pat in pats:
SCREAMING_SNAKE_CASE__ : Dict = key.replace(_snake_case ,"""_""".join(pat.split(""".""" ) ) )
return key
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = pt_tuple_key[:-1] + ("""scale""",)
if (
any("""norm""" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
SCREAMING_SNAKE_CASE__ : Tuple = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
SCREAMING_SNAKE_CASE__ : List[Any] = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
SCREAMING_SNAKE_CASE__ : Any = pt_tuple_key[:-1] + ("""embedding""",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
SCREAMING_SNAKE_CASE__ : str = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
SCREAMING_SNAKE_CASE__ : Tuple = pt_tensor.transpose(2 ,3 ,1 ,0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
SCREAMING_SNAKE_CASE__ : Dict = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight":
SCREAMING_SNAKE_CASE__ : str = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
SCREAMING_SNAKE_CASE__ : List[str] = pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
SCREAMING_SNAKE_CASE__ : str = pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowercase_ ( _snake_case ,_snake_case ,_snake_case=42 ):
# Step 1: Convert pytorch tensor to numpy
SCREAMING_SNAKE_CASE__ : str = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
SCREAMING_SNAKE_CASE__ : Optional[int] = flax_model.init_weights(PRNGKey(_snake_case ) )
SCREAMING_SNAKE_CASE__ : Any = flatten_dict(_snake_case )
SCREAMING_SNAKE_CASE__ : Dict = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
SCREAMING_SNAKE_CASE__ : Dict = rename_key(_snake_case )
SCREAMING_SNAKE_CASE__ : Dict = tuple(renamed_pt_key.split(""".""" ) )
# Correctly rename weight parameters
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = rename_key_and_reshape_tensor(_snake_case ,_snake_case ,_snake_case )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
SCREAMING_SNAKE_CASE__ : List[str] = jnp.asarray(_snake_case )
return unflatten_dict(_snake_case )
| 25 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : int =logging.get_logger(__name__)
_lowercase : Optional[Any] ={"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class snake_case__ (A__ ):
"""simple docstring"""
__lowerCAmelCase :Optional[int] = "openai-gpt"
__lowerCAmelCase :Any = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , __lowercase=4_0_4_7_8 , __lowercase=5_1_2 , __lowercase=7_6_8 , __lowercase=1_2 , __lowercase=1_2 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=1E-5 , __lowercase=0.0_2 , __lowercase="cls_index" , __lowercase=True , __lowercase=None , __lowercase=True , __lowercase=0.1 , **__lowercase , ) -> Optional[int]:
"""simple docstring"""
a__ : Tuple = vocab_size
a__ : Union[str, Any] = n_positions
a__ : int = n_embd
a__ : Dict = n_layer
a__ : Dict = n_head
a__ : List[str] = afn
a__ : List[str] = resid_pdrop
a__ : List[Any] = embd_pdrop
a__ : List[str] = attn_pdrop
a__ : Dict = layer_norm_epsilon
a__ : List[str] = initializer_range
a__ : Tuple = summary_type
a__ : Union[str, Any] = summary_use_proj
a__ : Optional[Any] = summary_activation
a__ : Union[str, Any] = summary_first_dropout
a__ : Optional[Any] = summary_proj_to_labels
super().__init__(**__lowercase )
| 170 | 0 |
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
__magic_name__: Dict = "\\n@inproceedings{snover-etal-2006-study,\n title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",\n author = \"Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John\",\n booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",\n month = aug # \" 8-12\",\n year = \"2006\",\n address = \"Cambridge, Massachusetts, USA\",\n publisher = \"Association for Machine Translation in the Americas\",\n url = \"https://aclanthology.org/2006.amta-papers.25\",\n pages = \"223--231\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
__magic_name__: List[str] = "\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n"
__magic_name__: List[Any] = "\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n 'score' (float): TER score (num_edits / sum_ref_lengths * 100)\n 'num_edits' (int): The cumulative number of edits\n 'ref_length' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}\n\n Example 2:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}\n\n Example 3:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}\n\n Example 4:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}\n\n Example 5:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case__ ( datasets.Metric ):
def __magic_name__ ( self ) -> List[Any]:
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""http://www.cs.umd.edu/~snover/tercom/""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#ter"""] , reference_urls=[
"""https://github.com/jhclark/tercom""",
] , )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , ) -> Tuple:
__magic_name__ : Optional[Any] = len(references[0] )
if any(len(_snake_case ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
__magic_name__ : Tuple = [[refs[i] for refs in references] for i in range(_snake_case )]
__magic_name__ : Tuple = TER(
normalized=_snake_case , no_punct=_snake_case , asian_support=_snake_case , case_sensitive=_snake_case , )
__magic_name__ : List[Any] = sb_ter.corpus_score(_snake_case , _snake_case )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 357 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__magic_name__: Tuple = {
"configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"],
"tokenization_tapas": ["TapasTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__: Dict = [
"TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TapasForMaskedLM",
"TapasForQuestionAnswering",
"TapasForSequenceClassification",
"TapasModel",
"TapasPreTrainedModel",
"load_tf_weights_in_tapas",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__: int = [
"TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFTapasForMaskedLM",
"TFTapasForQuestionAnswering",
"TFTapasForSequenceClassification",
"TFTapasModel",
"TFTapasPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
__magic_name__: Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 138 | 0 |
"""simple docstring"""
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
UpperCAmelCase__ = importlib.util.find_spec("""s3fs""") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
UpperCAmelCase__ = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
if "://" in dataset_path:
_UpperCAmelCase = dataset_path.split("""://""" )[1]
return dataset_path
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
if fs is not None and fs.protocol != "file":
return True
else:
return False
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = not is_remote_filesystem(lowercase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(lowercase ) ,fs._strip_protocol(lowercase ) )
else:
fs.mv(lowercase ,lowercase ,recursive=lowercase )
def __UpperCAmelCase ( ):
"""simple docstring"""
if hasattr(fsspec.asyn ,"""reset_lock""" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = threading.Lock()
| 289 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] ) -> List[str]:
__lowercase = [0 for i in range(r + 1 )]
# nc0 = 1
__lowercase = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
__lowercase = min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 325 | 0 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _A :
def __init__( self : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Tuple=2 , __magic_name__ : List[Any]=3 , __magic_name__ : Optional[int]=4 , __magic_name__ : Any=2 , __magic_name__ : Union[str, Any]=7 , __magic_name__ : Dict=True , __magic_name__ : Optional[Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : int=True , __magic_name__ : List[Any]=99 , __magic_name__ : List[Any]=36 , __magic_name__ : List[Any]=2 , __magic_name__ : str=4 , __magic_name__ : int=37 , __magic_name__ : int="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : int=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : Optional[Any]=2 , __magic_name__ : Tuple=0.02 , __magic_name__ : List[str]=6 , __magic_name__ : Dict=6 , __magic_name__ : Optional[Any]=3 , __magic_name__ : str=4 , __magic_name__ : Union[str, Any]=None , __magic_name__ : Union[str, Any]=10_00 , ) -> int:
"""simple docstring"""
__snake_case : Optional[Any] = parent
__snake_case : Tuple = batch_size
__snake_case : List[Any] = num_channels
__snake_case : Dict = image_size
__snake_case : Tuple = patch_size
__snake_case : str = is_training
__snake_case : Optional[Any] = use_input_mask
__snake_case : int = use_token_type_ids
__snake_case : str = use_labels
__snake_case : Dict = vocab_size
__snake_case : List[Any] = hidden_size
__snake_case : List[str] = num_hidden_layers
__snake_case : Dict = num_attention_heads
__snake_case : Union[str, Any] = intermediate_size
__snake_case : str = hidden_act
__snake_case : Dict = hidden_dropout_prob
__snake_case : Any = attention_probs_dropout_prob
__snake_case : int = max_position_embeddings
__snake_case : Optional[int] = type_vocab_size
__snake_case : Tuple = type_sequence_label_size
__snake_case : int = initializer_range
__snake_case : Optional[int] = coordinate_size
__snake_case : List[Any] = shape_size
__snake_case : Tuple = num_labels
__snake_case : List[Any] = num_choices
__snake_case : Optional[Any] = scope
__snake_case : List[str] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__snake_case : List[str] = text_seq_length
__snake_case : str = (image_size // patch_size) ** 2 + 1
__snake_case : Optional[Any] = self.text_seq_length + self.image_seq_length
def lowercase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__snake_case : str = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
__snake_case : Optional[int] = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__snake_case : Union[str, Any] = bbox[i, j, 3]
__snake_case : Union[str, Any] = bbox[i, j, 1]
__snake_case : Any = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
__snake_case : Optional[Any] = bbox[i, j, 2]
__snake_case : Tuple = bbox[i, j, 0]
__snake_case : Optional[Any] = tmp_coordinate
__snake_case : Dict = tf.constant(__magic_name__ )
__snake_case : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : Any = None
if self.use_input_mask:
__snake_case : str = random_attention_mask([self.batch_size, self.text_seq_length] )
__snake_case : List[Any] = None
if self.use_token_type_ids:
__snake_case : Any = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__snake_case : str = None
__snake_case : List[Any] = None
if self.use_labels:
__snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : str = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__snake_case : List[str] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowercase__ ( self : List[str] , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : int , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : Dict ) -> List[str]:
"""simple docstring"""
__snake_case : Optional[int] = TFLayoutLMvaModel(config=__magic_name__ )
# text + image
__snake_case : Optional[int] = model(__magic_name__ , pixel_values=__magic_name__ , training=__magic_name__ )
__snake_case : List[str] = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , training=__magic_name__ , )
__snake_case : Optional[int] = model(__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , training=__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__snake_case : Union[str, Any] = model(__magic_name__ , training=__magic_name__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__snake_case : Optional[Any] = model({"""pixel_values""": pixel_values} , training=__magic_name__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def lowercase__ ( self : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : Tuple , __magic_name__ : str ) -> Any:
"""simple docstring"""
__snake_case : Any = self.num_labels
__snake_case : Optional[int] = TFLayoutLMvaForSequenceClassification(config=__magic_name__ )
__snake_case : List[Any] = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , training=__magic_name__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Any , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Tuple ) -> List[str]:
"""simple docstring"""
__snake_case : str = self.num_labels
__snake_case : str = TFLayoutLMvaForTokenClassification(config=__magic_name__ )
__snake_case : Tuple = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , training=__magic_name__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def lowercase__ ( self : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : List[str] ) -> List[str]:
"""simple docstring"""
__snake_case : Optional[int] = 2
__snake_case : Dict = TFLayoutLMvaForQuestionAnswering(config=__magic_name__ )
__snake_case : List[Any] = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , training=__magic_name__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = self.prepare_config_and_inputs()
((__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case)) : Dict = config_and_inputs
__snake_case : List[Any] = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_tf
class _A ( __lowercase , __lowercase , unittest.TestCase ):
lowercase__: Optional[int] = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
lowercase__: Union[str, Any] = (
{'''document-question-answering''': TFLayoutLMvaForQuestionAnswering, '''feature-extraction''': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
lowercase__: Dict = False
lowercase__: int = False
lowercase__: Dict = False
def lowercase__ ( self : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
return True
def lowercase__ ( self : int , __magic_name__ : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : int=False ) -> dict:
"""simple docstring"""
__snake_case : Any = copy.deepcopy(__magic_name__ )
if model_class in get_values(__magic_name__ ):
__snake_case : Union[str, Any] = {
k: tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(__magic_name__ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__magic_name__ ):
__snake_case : str = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__magic_name__ ):
__snake_case : Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
__snake_case : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__magic_name__ ):
__snake_case : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__magic_name__ ):
__snake_case : int = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def lowercase__ ( self : Any ) -> int:
"""simple docstring"""
__snake_case : str = TFLayoutLMvaModelTester(self )
__snake_case : int = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def lowercase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
__snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : str = model_class(__magic_name__ )
if getattr(__magic_name__ , """hf_compute_loss""" , __magic_name__ ):
# The number of elements in the loss should be the same as the number of elements in the label
__snake_case : str = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ )
__snake_case : Any = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=__magic_name__ )[0]
]
__snake_case : List[str] = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
__snake_case : Any = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ )
__snake_case : Tuple = prepared_for_class.pop("""input_ids""" )
__snake_case : Union[str, Any] = model(__magic_name__ , **__magic_name__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
__snake_case : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ )
__snake_case : str = prepared_for_class.pop("""input_ids""" )
if "labels" in prepared_for_class:
__snake_case : str = prepared_for_class["""labels"""].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
__snake_case : Dict = -1_00
__snake_case : str = tf.convert_to_tensor(__magic_name__ )
__snake_case : Optional[Any] = model(__magic_name__ , **__magic_name__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
__snake_case : Optional[int] = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ )
__snake_case : Tuple = model(__magic_name__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
__snake_case : str = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ )
# Get keys that were added with the _prepare_for_class function
__snake_case : Tuple = prepared_for_class.keys() - inputs_dict.keys()
__snake_case : Optional[Any] = inspect.signature(model.call ).parameters
__snake_case : int = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
__snake_case : Union[str, Any] = {0: """input_ids"""}
for label_key in label_keys:
__snake_case : int = signature_names.index(__magic_name__ )
__snake_case : Optional[int] = label_key
__snake_case : Optional[int] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
__snake_case : Any = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
__snake_case : List[str] = prepared_for_class[value]
__snake_case : str = tuple(__magic_name__ )
# Send to model
__snake_case : List[Any] = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def lowercase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def lowercase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case : Tuple = type
self.model_tester.create_and_check_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def lowercase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
@slow
def lowercase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : str = TFLayoutLMvaModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def _a ( ) -> Optional[Any]:
"""simple docstring"""
__snake_case : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
class _A ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=__magic_name__ ) if is_vision_available() else None
@slow
def lowercase__ ( self : str ) -> str:
"""simple docstring"""
__snake_case : Dict = TFLayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" )
__snake_case : str = self.default_image_processor
__snake_case : Union[str, Any] = prepare_img()
__snake_case : List[Any] = image_processor(images=__magic_name__ , return_tensors="""tf""" ).pixel_values
__snake_case : Tuple = tf.constant([[1, 2]] )
__snake_case : Tuple = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
__snake_case : List[Any] = model(input_ids=__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , training=__magic_name__ )
# verify the logits
__snake_case : List[str] = (1, 1_99, 7_68)
self.assertEqual(outputs.last_hidden_state.shape , __magic_name__ )
__snake_case : Tuple = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __magic_name__ , atol=1E-4 ) )
| 13 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class _A ( unittest.TestCase ):
def lowercase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
__snake_case : List[Any] = tf.convert_to_tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276, # 5th highest value; idx. 9
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958, # 5th highest value; idx. 18
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
__snake_case : int = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
__snake_case : Optional[Any] = tf.convert_to_tensor(
[8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] , dtype=tf.floataa , ) # expected non filtered values as noted above
__snake_case : str = tf_top_k_top_p_filtering(__magic_name__ , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
__snake_case : Dict = output[output != -float("""inf""" )]
__snake_case : Optional[Any] = tf.cast(
tf.where(tf.not_equal(__magic_name__ , tf.constant(-float("""inf""" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(__magic_name__ , __magic_name__ , rtol=1E-12 )
tf.debugging.assert_equal(__magic_name__ , __magic_name__ )
@require_tf
class _A ( unittest.TestCase , __lowercase ):
# setting framework_dependent_parameters needs to be gated, just like its contents' imports
if is_tf_available():
lowercase__: Tuple = {
'''AutoModelForCausalLM''': TFAutoModelForCausalLM,
'''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq,
'''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM,
'''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq,
'''LogitsProcessorList''': TFLogitsProcessorList,
'''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor,
'''create_tensor_fn''': tf.convert_to_tensor,
'''floats_tensor''': floats_tensor,
'''return_tensors''': '''tf''',
}
@slow
def lowercase__ ( self : List[str] ) -> Any:
"""simple docstring"""
__snake_case : str = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__snake_case : Optional[int] = 2
__snake_case : str = 2
class _A ( tf.Module ):
def __init__( self : str , __magic_name__ : Optional[int] ) -> Tuple:
"""simple docstring"""
super(__magic_name__ , self ).__init__()
__snake_case : Dict = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name="""input_ids""" ),
tf.TensorSpec((None, input_length) , tf.intaa , name="""attention_mask""" ),
) , jit_compile=__magic_name__ , )
def lowercase__ ( self : Optional[int] , __magic_name__ : int , __magic_name__ : List[str] ) -> Dict:
"""simple docstring"""
__snake_case : Tuple = self.model.generate(
input_ids=__magic_name__ , attention_mask=__magic_name__ , max_new_tokens=__magic_name__ , return_dict_in_generate=__magic_name__ , )
return {"sequences": outputs["sequences"]}
__snake_case : int = [[2, 0], [1_02, 1_03]]
__snake_case : Tuple = [[1, 0], [1, 1]]
__snake_case : Union[str, Any] = DummyModel(model=__magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(__magic_name__ , __magic_name__ , signatures={"""serving_default""": dummy_model.serving} )
__snake_case : List[str] = tf.saved_model.load(__magic_name__ ).signatures["""serving_default"""]
for batch_size in range(1 , len(__magic_name__ ) + 1 ):
__snake_case : Union[str, Any] = {
"""input_ids""": tf.constant(dummy_input_ids[:batch_size] ),
"""attention_mask""": tf.constant(dummy_attention_masks[:batch_size] ),
}
__snake_case : Tuple = serving_func(**__magic_name__ )["""sequences"""]
__snake_case : List[str] = test_model.generate(**__magic_name__ , max_new_tokens=__magic_name__ )
tf.debugging.assert_equal(__magic_name__ , __magic_name__ )
@slow
def lowercase__ ( self : Tuple ) -> int:
"""simple docstring"""
__snake_case : Optional[int] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__snake_case : Dict = 1
__snake_case : int = 2
class _A ( tf.Module ):
def __init__( self : Tuple , __magic_name__ : List[str] ) -> int:
"""simple docstring"""
super(__magic_name__ , self ).__init__()
__snake_case : Optional[int] = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name="""input_ids""" ),
tf.TensorSpec((batch_size, None) , tf.intaa , name="""attention_mask""" ),
) , jit_compile=__magic_name__ , )
def lowercase__ ( self : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case : Optional[int] = self.model.generate(
input_ids=__magic_name__ , attention_mask=__magic_name__ , max_new_tokens=__magic_name__ , return_dict_in_generate=__magic_name__ , )
return {"sequences": outputs["sequences"]}
__snake_case : Union[str, Any] = [[2], [1_02, 1_03]]
__snake_case : Tuple = [[1], [1, 1]]
__snake_case : List[str] = DummyModel(model=__magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(__magic_name__ , __magic_name__ , signatures={"""serving_default""": dummy_model.serving} )
__snake_case : List[str] = tf.saved_model.load(__magic_name__ ).signatures["""serving_default"""]
for input_row in range(len(__magic_name__ ) ):
__snake_case : Tuple = {
"""input_ids""": tf.constant([dummy_input_ids[input_row]] ),
"""attention_mask""": tf.constant([dummy_attention_masks[input_row]] ),
}
__snake_case : str = serving_func(**__magic_name__ )["""sequences"""]
__snake_case : Union[str, Any] = test_model.generate(**__magic_name__ , max_new_tokens=__magic_name__ )
tf.debugging.assert_equal(__magic_name__ , __magic_name__ )
@slow
@require_tensorflow_text
def lowercase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id="""google/flan-t5-small""" , filename="""spiece.model""" , local_dir=__magic_name__ )
class _A ( tf.keras.layers.Layer ):
def __init__( self : Optional[int] ) -> int:
"""simple docstring"""
super().__init__()
__snake_case : Any = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(__magic_name__ , """spiece.model""" ) , """rb""" ).read() )
__snake_case : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
def lowercase__ ( self : Any , __magic_name__ : List[Any] , *__magic_name__ : str , **__magic_name__ : Optional[int] ) -> Dict:
"""simple docstring"""
__snake_case : Optional[int] = self.tokenizer.tokenize(__magic_name__ )
__snake_case , __snake_case : List[Any] = text.pad_model_inputs(
__magic_name__ , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
__snake_case : Optional[int] = self.model.generate(input_ids=__magic_name__ , attention_mask=__magic_name__ )
return self.tokenizer.detokenize(__magic_name__ )
__snake_case : int = CompleteSentenceTransformer()
__snake_case : Union[str, Any] = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="""inputs""" )
__snake_case : Tuple = complete_model(__magic_name__ )
__snake_case : Optional[Any] = tf.keras.Model(__magic_name__ , __magic_name__ )
keras_model.save(__magic_name__ )
def lowercase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Dict = {
"""do_sample""": True,
"""num_beams""": 1,
"""top_p""": 0.7,
"""top_k""": 10,
"""temperature""": 0.7,
}
__snake_case : str = 14
__snake_case : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__snake_case : int = """Hello, my dog is cute and"""
__snake_case : Any = tokenizer(__magic_name__ , return_tensors="""tf""" )
__snake_case : List[Any] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__snake_case : List[Any] = 6_38
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
__snake_case : int = model.generate(**__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
__snake_case : Dict = [6_38, 1_98]
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
__snake_case : Optional[int] = model.generate(**__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def lowercase__ ( self : Tuple ) -> str:
"""simple docstring"""
__snake_case : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
__snake_case : str = """Hugging Face is a technology company based in New York and Paris."""
__snake_case : str = bart_tokenizer(__magic_name__ , return_tensors="""tf""" ).input_ids
__snake_case : Union[str, Any] = TFBartForConditionalGeneration.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
__snake_case : int = bart_model.generate(__magic_name__ ).numpy()
class _A ( __lowercase ):
def lowercase__ ( self : int , __magic_name__ : Any , __magic_name__ : int=None , **__magic_name__ : int ) -> Optional[Any]:
"""simple docstring"""
return super().call(__magic_name__ , **__magic_name__ )
__snake_case : Union[str, Any] = FakeBart.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
__snake_case : Optional[Any] = bart_model.generate(__magic_name__ , foo="""bar""" ).numpy()
self.assertTrue(np.array_equal(__magic_name__ , __magic_name__ ) )
class _A ( bart_model.model.encoder.__class__ ):
def lowercase__ ( self : Optional[int] , __magic_name__ : Optional[int] , **__magic_name__ : Tuple ) -> Dict:
"""simple docstring"""
return super().call(__magic_name__ , **__magic_name__ )
__snake_case : List[Any] = FakeEncoder(bart_model.config , bart_model.model.shared )
__snake_case : Tuple = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
__snake_case : Dict = bart_model.generate(__magic_name__ ).numpy()
with self.assertRaises(__magic_name__ ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(__magic_name__ , foo="""bar""" )
| 13 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class _snake_case ( unittest.TestCase ):
def __init__( self , _lowerCamelCase , _lowerCamelCase=7 , _lowerCamelCase=3 , _lowerCamelCase=30 , _lowerCamelCase=400 , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=True , _lowerCamelCase=1 / 255 , _lowerCamelCase=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
a :int = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
a :Union[str, Any] = parent
a :Dict = batch_size
a :Any = num_channels
a :Optional[int] = min_resolution
a :Dict = max_resolution
a :Tuple = do_resize
a :Union[str, Any] = size
a :Optional[int] = do_normalize
a :Tuple = image_mean
a :Tuple = image_std
a :Any = do_rescale
a :Optional[int] = rescale_factor
a :Union[str, Any] = do_pad
def SCREAMING_SNAKE_CASE__ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase=False ):
if not batched:
a :Optional[Any] = image_inputs[0]
if isinstance(_lowerCamelCase , Image.Image ):
a , a :Tuple = image.size
else:
a , a :int = image.shape[1], image.shape[2]
if w < h:
a :int = int(self.size['''shortest_edge'''] * h / w )
a :List[Any] = self.size['''shortest_edge''']
elif w > h:
a :Optional[Any] = self.size['''shortest_edge''']
a :Union[str, Any] = int(self.size['''shortest_edge'''] * w / h )
else:
a :Dict = self.size['''shortest_edge''']
a :Any = self.size['''shortest_edge''']
else:
a :Dict = []
for image in image_inputs:
a , a :Tuple = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
a :Union[str, Any] = max(_lowerCamelCase , key=lambda _lowerCamelCase : item[0] )[0]
a :Tuple = max(_lowerCamelCase , key=lambda _lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _snake_case ( _snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = ConditionalDetrImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self ):
a :Dict = ConditionalDetrImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self ):
a :Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''image_std''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''size''' ) )
def SCREAMING_SNAKE_CASE__ ( self ):
a :str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , _lowerCamelCase )
a :Dict = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_lowerCamelCase )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , _lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
# Initialize image_processing
a :Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a :Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
a :Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
a , a :Any = self.image_processor_tester.get_expected_values(_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a , a :Optional[int] = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase )
a :int = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE__ ( self ):
# Initialize image_processing
a :Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a :Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
a :Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
a , a :str = self.image_processor_tester.get_expected_values(_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a :Dict = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
a , a :Dict = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE__ ( self ):
# Initialize image_processing
a :str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a :int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
a :Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
a , a :Tuple = self.image_processor_tester.get_expected_values(_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a :List[Any] = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
a , a :List[str] = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
# prepare image and target
a :Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
a :Any = json.loads(f.read() )
a :str = {'''image_id''': 3_9769, '''annotations''': target}
# encode them
a :int = ConditionalDetrImageProcessor.from_pretrained('''microsoft/conditional-detr-resnet-50''' )
a :Union[str, Any] = image_processing(images=_lowerCamelCase , annotations=_lowerCamelCase , return_tensors='''pt''' )
# verify pixel values
a :List[str] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , _lowerCamelCase )
a :Optional[int] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _lowerCamelCase , atol=1e-4 ) )
# verify area
a :Tuple = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _lowerCamelCase ) )
# verify boxes
a :Any = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _lowerCamelCase )
a :str = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _lowerCamelCase , atol=1e-3 ) )
# verify image_id
a :Dict = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _lowerCamelCase ) )
# verify is_crowd
a :Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _lowerCamelCase ) )
# verify class_labels
a :List[str] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _lowerCamelCase ) )
# verify orig_size
a :Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _lowerCamelCase ) )
# verify size
a :Any = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _lowerCamelCase ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
# prepare image, target and masks_path
a :List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
a :List[Any] = json.loads(f.read() )
a :Dict = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9769, '''segments_info''': target}
a :Tuple = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
a :int = ConditionalDetrImageProcessor(format='''coco_panoptic''' )
a :Optional[Any] = image_processing(images=_lowerCamelCase , annotations=_lowerCamelCase , masks_path=_lowerCamelCase , return_tensors='''pt''' )
# verify pixel values
a :Optional[Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , _lowerCamelCase )
a :str = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _lowerCamelCase , atol=1e-4 ) )
# verify area
a :Optional[int] = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _lowerCamelCase ) )
# verify boxes
a :int = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _lowerCamelCase )
a :Any = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _lowerCamelCase , atol=1e-3 ) )
# verify image_id
a :int = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _lowerCamelCase ) )
# verify is_crowd
a :Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _lowerCamelCase ) )
# verify class_labels
a :List[str] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _lowerCamelCase ) )
# verify masks
a :Optional[Any] = 82_2873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , _lowerCamelCase )
# verify orig_size
a :Optional[Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _lowerCamelCase ) )
# verify size
a :int = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _lowerCamelCase ) )
| 94 |
'''simple docstring'''
from __future__ import annotations
def _a( UpperCamelCase__ : list[int] ):
'''simple docstring'''
if not nums:
return 0
SCREAMING_SNAKE_CASE__ : Dict =nums[0]
SCREAMING_SNAKE_CASE__ : Optional[int] =0
for num in nums[1:]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict =(
max_excluding + num,
max(UpperCamelCase__, UpperCamelCase__ ),
)
return max(UpperCamelCase__, UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 152 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__UpperCamelCase : str = {
'''configuration_falcon''': ['''FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FalconConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = [
'''FALCON_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FalconForCausalLM''',
'''FalconModel''',
'''FalconPreTrainedModel''',
'''FalconForSequenceClassification''',
'''FalconForTokenClassification''',
'''FalconForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
__UpperCamelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 368 |
"""simple docstring"""
from __future__ import annotations
import math
__UpperCamelCase : Dict = '''2020.9.26'''
__UpperCamelCase : Tuple = '''xcodz-dot, cclaus, dhruvmanila'''
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ , A_ ):
if not all(isinstance(A_ , (float, int) ) for val in locals().values() ):
lowerCAmelCase__ : Optional[Any] = f'Input values must either be float or int: {list(locals().values() )}'
raise TypeError(A_ )
lowerCAmelCase__ : Optional[Any] = ((x * distance) / (z + distance)) * scale
lowerCAmelCase__ : Optional[int] = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ , A_ ):
if not isinstance(A_ , A_ ):
raise TypeError('''Axis must be a str''' )
lowerCAmelCase__ : str = locals()
del input_variables["axis"]
if not all(isinstance(A_ , (float, int) ) for val in input_variables.values() ):
lowerCAmelCase__ : int = (
'''Input values except axis must either be float or int: '''
f'{list(input_variables.values() )}'
)
raise TypeError(A_ )
lowerCAmelCase__ : Any = (angle % 3_60) / 4_50 * 1_80 / math.pi
if axis == "z":
lowerCAmelCase__ : Tuple = x * math.cos(A_ ) - y * math.sin(A_ )
lowerCAmelCase__ : List[str] = y * math.cos(A_ ) + x * math.sin(A_ )
lowerCAmelCase__ : Optional[Any] = z
elif axis == "x":
lowerCAmelCase__ : List[str] = y * math.cos(A_ ) - z * math.sin(A_ )
lowerCAmelCase__ : str = z * math.cos(A_ ) + y * math.sin(A_ )
lowerCAmelCase__ : Union[str, Any] = x
elif axis == "y":
lowerCAmelCase__ : Optional[int] = x * math.cos(A_ ) - z * math.sin(A_ )
lowerCAmelCase__ : Tuple = z * math.cos(A_ ) + x * math.sin(A_ )
lowerCAmelCase__ : Optional[int] = y
else:
raise ValueError('''not a valid axis, choose one of \'x\', \'y\', \'z\'''' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{convert_to_ad(1.0, 2.0, 3.0, 1_0.0, 1_0.0) = }''')
print(F'''{rotate(1.0, 2.0, 3.0, 'y', 9_0.0) = }''')
| 74 | 0 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCamelCase__ :
def __init__(self , UpperCAmelCase , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=2 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=9_9 , UpperCAmelCase=3_6 , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=3_7 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=5_1_2 , UpperCAmelCase=1_6 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=6 , UpperCAmelCase=6 , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=None , UpperCAmelCase=1_0_0_0 , ) -> Tuple:
_lowercase =parent
_lowercase =batch_size
_lowercase =num_channels
_lowercase =image_size
_lowercase =patch_size
_lowercase =text_seq_length
_lowercase =is_training
_lowercase =use_input_mask
_lowercase =use_token_type_ids
_lowercase =use_labels
_lowercase =vocab_size
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =intermediate_size
_lowercase =hidden_act
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =max_position_embeddings
_lowercase =type_vocab_size
_lowercase =type_sequence_label_size
_lowercase =initializer_range
_lowercase =coordinate_size
_lowercase =shape_size
_lowercase =num_labels
_lowercase =num_choices
_lowercase =scope
_lowercase =range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_lowercase =text_seq_length
_lowercase =(image_size // patch_size) ** 2 + 1
_lowercase =self.text_seq_length + self.image_seq_length
def __A (self ) -> Any:
_lowercase =ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
_lowercase =ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowercase =bbox[i, j, 3]
_lowercase =bbox[i, j, 1]
_lowercase =t
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowercase =bbox[i, j, 2]
_lowercase =bbox[i, j, 0]
_lowercase =t
_lowercase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase =None
if self.use_input_mask:
_lowercase =random_attention_mask([self.batch_size, self.text_seq_length] )
_lowercase =None
if self.use_token_type_ids:
_lowercase =ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
_lowercase =None
_lowercase =None
if self.use_labels:
_lowercase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase =ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
_lowercase =LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
_lowercase =LayoutLMvaModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
# text + image
_lowercase =model(UpperCAmelCase_ , pixel_values=UpperCAmelCase_ )
_lowercase =model(
UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
_lowercase =model(UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
_lowercase =model(UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
_lowercase =model(UpperCAmelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_lowercase =model(pixel_values=UpperCAmelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
_lowercase =self.num_labels
_lowercase =LayoutLMvaForSequenceClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowercase =model(
UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
_lowercase =self.num_labels
_lowercase =LayoutLMvaForTokenClassification(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowercase =model(
UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Tuple:
_lowercase =LayoutLMvaForQuestionAnswering(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowercase =model(
UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A (self ) -> Optional[Any]:
_lowercase =self.prepare_config_and_inputs()
(
_lowercase
) =config_and_inputs
_lowercase ={
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
return True
def __A (self ) -> Dict:
_lowercase =LayoutLMvaModelTester(self )
_lowercase =ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=3_7 )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) -> Optional[Any]:
_lowercase =copy.deepcopy(UpperCAmelCase_ )
if model_class in get_values(UpperCAmelCase_ ):
_lowercase ={
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(UpperCAmelCase_ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(UpperCAmelCase_ ):
_lowercase =torch.ones(self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_ )
elif model_class in get_values(UpperCAmelCase_ ):
_lowercase =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_ )
_lowercase =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_ )
elif model_class in [
*get_values(UpperCAmelCase_ ),
]:
_lowercase =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_ )
elif model_class in [
*get_values(UpperCAmelCase_ ),
]:
_lowercase =torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=UpperCAmelCase_ , )
return inputs_dict
def __A (self ) -> int:
self.config_tester.run_common_tests()
def __A (self ) -> List[str]:
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def __A (self ) -> str:
_lowercase =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowercase =type
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def __A (self ) -> List[Any]:
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase_ )
def __A (self ) -> Optional[Any]:
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
def __A (self ) -> Optional[int]:
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_ )
@slow
def __A (self ) -> Union[str, Any]:
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase =LayoutLMvaModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def UpperCAmelCase_ ( ) -> Dict:
"""simple docstring"""
_lowercase =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class lowerCamelCase__ ( unittest.TestCase):
@cached_property
def __A (self ) -> str:
return LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase_ ) if is_vision_available() else None
@slow
def __A (self ) -> Optional[Any]:
_lowercase =LayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ).to(UpperCAmelCase_ )
_lowercase =self.default_image_processor
_lowercase =prepare_img()
_lowercase =image_processor(images=UpperCAmelCase_ , return_tensors='''pt''' ).pixel_values.to(UpperCAmelCase_ )
_lowercase =torch.tensor([[1, 2]] )
_lowercase =torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
_lowercase =model(
input_ids=input_ids.to(UpperCAmelCase_ ) , bbox=bbox.to(UpperCAmelCase_ ) , pixel_values=pixel_values.to(UpperCAmelCase_ ) , )
# verify the logits
_lowercase =torch.Size((1, 1_9_9, 7_6_8) )
self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase_ )
_lowercase =torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase_ , atol=1e-4 ) )
| 5 |
import itertools
import math
def lowerCAmelCase_ ( __a ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__a ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCAmelCase_ ( ) -> str:
"""simple docstring"""
lowerCamelCase__: Optional[int] =2
while True:
if is_prime(__a ):
yield num
num += 1
def lowerCAmelCase_ ( __a = 10001 ) -> int:
"""simple docstring"""
return next(itertools.islice(prime_generator() , nth - 1 , __a ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 10 | 0 |
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : Tuple , lowercase : int , lowercase : Optional[int] ):
'''simple docstring'''
lowerCamelCase_ = FunnelConfig.from_json_file(lowercase )
print(f"""Building PyTorch model from configuration: {config}""" )
lowerCamelCase_ = FunnelBaseModel(lowercase ) if base_model else FunnelModel(lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(lowercase , lowercase , lowercase )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , lowercase )
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not."
)
lowerCamelCase : List[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 371 |
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("1.0.0a"):
raise Exception("requires fairseq >= 1.0.0a")
logging.set_verbosity_info()
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : Union[str, Any] = "Hello world! cécé herlolip"
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : str , lowercase : bool ):
'''simple docstring'''
lowerCamelCase_ = FairseqRobertaModel.from_pretrained(lowercase )
roberta.eval() # disable dropout
lowerCamelCase_ = roberta.model.encoder.sentence_encoder
lowerCamelCase_ = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1e-5 , )
if classification_head:
lowerCamelCase_ = roberta.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our RoBERTa config:' , lowercase )
lowerCamelCase_ = XLMRobertaXLForSequenceClassification(lowercase ) if classification_head else XLMRobertaXLForMaskedLM(lowercase )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowerCamelCase_ = roberta_sent_encoder.embed_tokens.weight
lowerCamelCase_ = roberta_sent_encoder.embed_positions.weight
lowerCamelCase_ = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
lowerCamelCase_ = roberta_sent_encoder.layer_norm.weight
lowerCamelCase_ = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowerCamelCase_ = model.roberta.encoder.layer[i]
lowerCamelCase_ = roberta_sent_encoder.layers[i]
lowerCamelCase_ = layer.attention
lowerCamelCase_ = roberta_layer.self_attn_layer_norm.weight
lowerCamelCase_ = roberta_layer.self_attn_layer_norm.bias
# self attention
lowerCamelCase_ = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
lowerCamelCase_ = roberta_layer.self_attn.q_proj.weight
lowerCamelCase_ = roberta_layer.self_attn.q_proj.bias
lowerCamelCase_ = roberta_layer.self_attn.k_proj.weight
lowerCamelCase_ = roberta_layer.self_attn.k_proj.bias
lowerCamelCase_ = roberta_layer.self_attn.v_proj.weight
lowerCamelCase_ = roberta_layer.self_attn.v_proj.bias
# self-attention output
lowerCamelCase_ = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
lowerCamelCase_ = roberta_layer.self_attn.out_proj.weight
lowerCamelCase_ = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
lowerCamelCase_ = roberta_layer.final_layer_norm.weight
lowerCamelCase_ = roberta_layer.final_layer_norm.bias
# intermediate
lowerCamelCase_ = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
lowerCamelCase_ = roberta_layer.fca.weight
lowerCamelCase_ = roberta_layer.fca.bias
# output
lowerCamelCase_ = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
lowerCamelCase_ = roberta_layer.fca.weight
lowerCamelCase_ = roberta_layer.fca.bias
# end of layer
if classification_head:
lowerCamelCase_ = roberta.model.classification_heads['mnli'].dense.weight
lowerCamelCase_ = roberta.model.classification_heads['mnli'].dense.bias
lowerCamelCase_ = roberta.model.classification_heads['mnli'].out_proj.weight
lowerCamelCase_ = roberta.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
lowerCamelCase_ = roberta.model.encoder.lm_head.dense.weight
lowerCamelCase_ = roberta.model.encoder.lm_head.dense.bias
lowerCamelCase_ = roberta.model.encoder.lm_head.layer_norm.weight
lowerCamelCase_ = roberta.model.encoder.lm_head.layer_norm.bias
lowerCamelCase_ = roberta.model.encoder.lm_head.weight
lowerCamelCase_ = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowerCamelCase_ = roberta.encode(lowercase ).unsqueeze(0 ) # batch of size 1
lowerCamelCase_ = model(lowercase )[0]
if classification_head:
lowerCamelCase_ = roberta.model.classification_heads['mnli'](roberta.extract_features(lowercase ) )
else:
lowerCamelCase_ = roberta.model(lowercase )[0]
print(our_output.shape , their_output.shape )
lowerCamelCase_ = torch.max(torch.abs(our_output - their_output ) ).item()
print(f"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
lowerCamelCase_ = torch.allclose(lowercase , lowercase , atol=1e-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
pathlib.Path(lowercase ).mkdir(parents=lowercase , exist_ok=lowercase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase )
if __name__ == "__main__":
lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
lowerCamelCase : List[Any] = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 208 | 0 |
def UpperCamelCase ( __lowerCamelCase : int ):
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError("Input value must be a 'int' type" )
return bin(__lowerCamelCase ).count("1" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 59 |
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__lowerCAmelCase : Optional[int] =16
__lowerCAmelCase : Tuple =32
def UpperCamelCase ( _lowerCamelCase : Accelerator , _lowerCamelCase : DatasetDict , _lowerCamelCase : List[int] , _lowerCamelCase : List[int] , _lowerCamelCase : int = 16 ):
A__ = AutoTokenizer.from_pretrained("bert-base-cased" )
A__ = DatasetDict(
{
"train": dataset["train"].select(_lowerCamelCase ),
"validation": dataset["train"].select(_lowerCamelCase ),
"test": dataset["validation"],
} )
def tokenize_function(_lowerCamelCase : Dict ):
# max_length=None => use the model max length (it's actually the default)
A__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_lowerCamelCase , max_length=_lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A__ = datasets.map(
_lowerCamelCase , batched=_lowerCamelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_lowerCamelCase : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A__ = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A__ = 16
elif accelerator.mixed_precision != "no":
A__ = 8
else:
A__ = None
return tokenizer.pad(
_lowerCamelCase , padding="longest" , max_length=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_tensors="pt" , )
# Instantiate dataloaders.
A__ = DataLoader(
tokenized_datasets["train"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
A__ = DataLoader(
tokenized_datasets["validation"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
A__ = DataLoader(
tokenized_datasets["test"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
return train_dataloader, eval_dataloader, test_dataloader
def UpperCamelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : str ):
# New Code #
A__ = []
# Download the dataset
A__ = load_dataset("glue" , "mrpc" )
# Create our splits
A__ = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
A__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ = config["lr"]
A__ = int(config["num_epochs"] )
A__ = int(config["seed"] )
A__ = int(config["batch_size"] )
A__ = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
A__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A__ = batch_size // MAX_GPU_BATCH_SIZE
A__ = MAX_GPU_BATCH_SIZE
set_seed(_lowerCamelCase )
# New Code #
# Create our folds:
A__ = kfold.split(np.zeros(datasets["train"].num_rows ) , datasets["train"]["label"] )
A__ = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(_lowerCamelCase ):
A__, A__, A__ = get_fold_dataloaders(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A__ = model.to(accelerator.device )
# Instantiate optimizer
A__ = AdamW(params=model.parameters() , lr=_lowerCamelCase )
# Instantiate scheduler
A__ = get_linear_schedule_with_warmup(
optimizer=_lowerCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(_lowerCamelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__, A__, A__, A__, A__ = accelerator.prepare(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Now we train the model
for epoch in range(_lowerCamelCase ):
model.train()
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
A__ = model(**_lowerCamelCase )
A__ = outputs.loss
A__ = loss / gradient_accumulation_steps
accelerator.backward(_lowerCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ = model(**_lowerCamelCase )
A__ = outputs.logits.argmax(dim=-1 )
A__, A__ = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_lowerCamelCase , references=_lowerCamelCase , )
A__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , _lowerCamelCase )
# New Code #
# We also run predictions on the test set at the very end
A__ = []
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ = model(**_lowerCamelCase )
A__ = outputs.logits
A__, A__ = accelerator.gather_for_metrics((predictions, batch["labels"]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(_lowerCamelCase , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
A__ = torch.cat(_lowerCamelCase , dim=0 )
A__ = torch.stack(_lowerCamelCase , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
A__ = metric.compute(predictions=_lowerCamelCase , references=_lowerCamelCase )
accelerator.print("Average test metrics from all folds:" , _lowerCamelCase )
def UpperCamelCase ( ):
A__ = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_lowerCamelCase , default=_lowerCamelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
# New Code #
parser.add_argument("--num_folds" , type=_lowerCamelCase , default=3 , help="The number of splits to perform across the dataset" )
A__ = parser.parse_args()
A__ = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
main()
| 237 | 0 |
from __future__ import annotations
import pandas as pd
def _lowerCAmelCase ( A__: list[int] , A__: list[int] , A__: int ):
'''simple docstring'''
UpperCAmelCase = [0] * no_of_processes
UpperCAmelCase = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(A__ ):
UpperCAmelCase = burst_time[i]
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 9_9999_9999
UpperCAmelCase = 0
UpperCAmelCase = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(A__ ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
UpperCAmelCase = remaining_time[j]
UpperCAmelCase = j
UpperCAmelCase = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
UpperCAmelCase = remaining_time[short]
if minm == 0:
UpperCAmelCase = 9_9999_9999
if remaining_time[short] == 0:
complete += 1
UpperCAmelCase = False
# Find finish time of current process
UpperCAmelCase = increment_time + 1
# Calculate waiting time
UpperCAmelCase = finish_time - arrival_time[short]
UpperCAmelCase = finar - burst_time[short]
if waiting_time[short] < 0:
UpperCAmelCase = 0
# Increment time
increment_time += 1
return waiting_time
def _lowerCAmelCase ( A__: list[int] , A__: int , A__: list[int] ):
'''simple docstring'''
UpperCAmelCase = [0] * no_of_processes
for i in range(A__ ):
UpperCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
def _lowerCAmelCase ( A__: list[int] , A__: list[int] , A__: int ):
'''simple docstring'''
UpperCAmelCase = 0
UpperCAmelCase = 0
for i in range(A__ ):
UpperCAmelCase = total_waiting_time + waiting_time[i]
UpperCAmelCase = total_turn_around_time + turn_around_time[i]
print(F"""Average waiting time = {total_waiting_time / no_of_processes:.5f}""" )
print('''Average turn around time =''' , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print("Enter how many process you want to analyze")
__magic_name__ = int(input())
__magic_name__ = [0] * no_of_processes
__magic_name__ = [0] * no_of_processes
__magic_name__ = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print("Enter the arrival time and burst time for process:--" + str(i + 1))
__magic_name__ , __magic_name__ = map(int, input().split())
__magic_name__ = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__magic_name__ = burst_time
__magic_name__ = no_of_processes
__magic_name__ = waiting_time
__magic_name__ = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
__magic_name__ = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
"Process",
"BurstTime",
"ArrivalTime",
"WaitingTime",
"TurnAroundTime",
],
)
# Printing the dataFrame
pd.set_option("display.max_rows", fcfs.shape[0] + 1)
print(fcfs)
| 152 |
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
__magic_name__ = logging.getLogger()
def _lowerCAmelCase ( A__: Path , A__: list ):
'''simple docstring'''
UpperCAmelCase = '''\n'''.join(A__ )
Path(A__ ).open('''w''' ).writelines(A__ )
__magic_name__ = "patrickvonplaten/t5-tiny-random"
__magic_name__ = "sshleifer/bart-tiny-random"
__magic_name__ = "sshleifer/tiny-mbart"
__magic_name__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class lowercase ( A__ ):
'''simple docstring'''
def snake_case_ ( self , _snake_case ) -> int:
"""simple docstring"""
UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source'''
UpperCAmelCase = input_file_name.parent / '''utest_output.txt'''
assert not output_file_name.exists()
UpperCAmelCase = [''' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.''']
_dump_articles(_snake_case , _snake_case )
UpperCAmelCase = str(Path(self.get_auto_remove_tmp_dir() ) / '''scores.json''' )
UpperCAmelCase = '''translation_en_to_de''' if model == T5_TINY else '''summarization'''
UpperCAmelCase = f"""
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
""".split()
with patch.object(_snake_case , '''argv''' , _snake_case ):
run_generate()
assert Path(_snake_case ).exists()
# os.remove(Path(output_file_name))
def snake_case_ ( self ) -> Dict:
"""simple docstring"""
self.run_eval_tester(_snake_case )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def snake_case_ ( self , _snake_case ) -> Any:
"""simple docstring"""
self.run_eval_tester(_snake_case )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def snake_case_ ( self , _snake_case ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source'''
UpperCAmelCase = input_file_name.parent / '''utest_output.txt'''
assert not output_file_name.exists()
UpperCAmelCase = {
'''en''': ['''Machine learning is great, isn\'t it?''', '''I like to eat bananas''', '''Tomorrow is another great day!'''],
'''de''': [
'''Maschinelles Lernen ist großartig, oder?''',
'''Ich esse gerne Bananen''',
'''Morgen ist wieder ein toller Tag!''',
],
}
UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() )
UpperCAmelCase = str(tmp_dir / '''scores.json''' )
UpperCAmelCase = str(tmp_dir / '''val.target''' )
_dump_articles(_snake_case , text['''en'''] )
_dump_articles(_snake_case , text['''de'''] )
UpperCAmelCase = '''translation_en_to_de''' if model == T5_TINY else '''summarization'''
UpperCAmelCase = f"""
run_eval_search.py
{model}
{str(_snake_case )}
{str(_snake_case )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
""".split()
testargs.extend(['''--search''', '''num_beams=1:2 length_penalty=0.9:1.0'''] )
with patch.object(_snake_case , '''argv''' , _snake_case ):
with CaptureStdout() as cs:
run_search()
UpperCAmelCase = [''' num_beams | length_penalty''', model, '''Best score args''']
UpperCAmelCase = ['''Info''']
if "translation" in task:
expected_strings.append('''bleu''' )
else:
expected_strings.extend(_snake_case )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(_snake_case ).exists()
os.remove(Path(_snake_case ) )
| 152 | 1 |
"""simple docstring"""
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[int] = {"""vocab_file""": """vocab.txt"""}
SCREAMING_SNAKE_CASE : Optional[Any] = {
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
SCREAMING_SNAKE_CASE : List[Any] = {
"""openbmb/cpm-ant-10b""": 1024,
}
def lowercase ( _snake_case : List[str] ) ->Optional[Any]:
"""simple docstring"""
__snake_case : int = collections.OrderedDict()
with open(_snake_case , '''r''' , encoding='''utf-8''' ) as reader:
__snake_case : Optional[int] = reader.readlines()
for index, token in enumerate(_snake_case ):
__snake_case : Optional[Any] = token.rstrip('''\n''' )
__snake_case : int = index
return vocab
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ , a_="<unk>" , a_=2_00 ):
'''simple docstring'''
__snake_case : Any = vocab
__snake_case : str = unk_token
__snake_case : Tuple = max_input_chars_per_word
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : str = list(a_ )
if len(a_ ) > self.max_input_chars_per_word:
return [self.unk_token]
__snake_case : List[Any] = 0
__snake_case : Optional[Any] = []
while start < len(a_ ):
__snake_case : List[str] = len(a_ )
__snake_case : List[Any] = None
while start < end:
__snake_case : int = ''''''.join(chars[start:end] )
if substr in self.vocab:
__snake_case : Dict = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(a_ )
__snake_case : List[str] = end
return sub_tokens
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =VOCAB_FILES_NAMES
lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ =['input_ids', 'attention_mask']
lowerCamelCase__ =False
def __init__(self , a_ , a_="<d>" , a_="</d>" , a_="<s>" , a_="</s>" , a_="<pad>" , a_="<unk>" , a_="</n>" , a_="</_>" , a_="left" , **a_ , ):
'''simple docstring'''
requires_backends(self , ['''jieba'''] )
super().__init__(
bod_token=a_ , eod_token=a_ , bos_token=a_ , eos_token=a_ , pad_token=a_ , unk_token=a_ , line_token=a_ , space_token=a_ , padding_side=a_ , **a_ , )
__snake_case : Union[str, Any] = bod_token
__snake_case : List[Any] = eod_token
__snake_case : List[Any] = load_vocab(a_ )
__snake_case : Dict = self.encoder[space_token]
__snake_case : Any = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
__snake_case : List[str] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda a_ : x[1] ) )
__snake_case : str = {v: k for k, v in self.encoder.items()}
__snake_case : List[Any] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.encoder[self.bod_token]
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.encoder[self.eod_token]
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.encoder["\n"]
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return len(self.encoder )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : Any = []
for x in jieba.cut(a_ , cut_all=a_ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(a_ ) )
return output_tokens
def SCREAMING_SNAKE_CASE (self , a_ , **a_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = [i for i in token_ids if i >= 0]
__snake_case : Tuple = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(a_ , **a_ )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
return token in self.encoder
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
return "".join(a_ )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
return self.encoder.get(a_ , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
return self.decoder.get(a_ , self.unk_token )
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ):
'''simple docstring'''
if os.path.isdir(a_ ):
__snake_case : Optional[Any] = os.path.join(
a_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
__snake_case : Optional[Any] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
__snake_case : List[str] = 0
if " " in self.encoder:
__snake_case : Optional[Any] = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
__snake_case : int = self.encoder['''\n''']
del self.encoder["\n"]
__snake_case : Dict = collections.OrderedDict(sorted(self.encoder.items() , key=lambda a_ : x[1] ) )
with open(a_ , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
__snake_case : Union[str, Any] = token_index
writer.write(token + '''\n''' )
index += 1
return (vocab_file,)
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None , a_ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
if token_ids_a is not None:
return [1] + ([0] * len(a_ )) + [1] + ([0] * len(a_ ))
return [1] + ([0] * len(a_ ))
| 102 | import argparse
import json
from tqdm import tqdm
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--src_path''' , type=SCREAMING_SNAKE_CASE , default='''biencoder-nq-dev.json''' , help='''Path to raw DPR training data''' , )
parser.add_argument(
'''--evaluation_set''' , type=SCREAMING_SNAKE_CASE , help='''where to store parsed evaluation_set file''' , )
parser.add_argument(
'''--gold_data_path''' , type=SCREAMING_SNAKE_CASE , help='''where to store parsed gold_data_path file''' , )
__UpperCamelCase :str = parser.parse_args()
with open(args.src_path , '''r''' ) as src_file, open(args.evaluation_set , '''w''' ) as eval_file, open(
args.gold_data_path , '''w''' ) as gold_file:
__UpperCamelCase :List[str] = json.load(SCREAMING_SNAKE_CASE )
for dpr_record in tqdm(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :List[str] = dpr_record['''question''']
__UpperCamelCase :Tuple = [context['''title'''] for context in dpr_record['''positive_ctxs''']]
eval_file.write(question + '''\n''' )
gold_file.write('''\t'''.join(SCREAMING_SNAKE_CASE ) + '''\n''' )
if __name__ == "__main__":
main()
| 43 | 0 |
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase ( a_ ) -> list[int]:
"""simple docstring"""
A_ : Optional[int] = 2
A_ : Union[str, Any] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(a_ )
if n > 1:
factors.append(a_ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 164 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCamelCase ) -> Optional[Any]:
A_ : Any = data
A_ : Node | None = None
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self ) -> List[str]:
A_ : Tuple = None
A_ : str = None
def __iter__( self ) -> Iterator[Any]:
A_ : Dict = self.head
while self.head:
yield node.data
A_ : Optional[Any] = node.next
if node == self.head:
break
def __len__( self ) -> int:
return sum(1 for _ in self )
def __repr__( self ) -> str:
return "->".join(str(_lowerCamelCase ) for item in iter(self ) )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> None:
self.insert_nth(len(self ) , _lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> None:
self.insert_nth(0 , _lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> None:
if index < 0 or index > len(self ):
raise IndexError("""list index out of range.""" )
A_ : Optional[int] = Node(_lowerCamelCase )
if self.head is None:
A_ : str = new_node # first node points itself
A_ : Union[str, Any] = new_node
elif index == 0: # insert at head
A_ : List[Any] = self.head
A_ : List[Any] = new_node
else:
A_ : List[str] = self.head
for _ in range(index - 1 ):
A_ : Optional[int] = temp.next
A_ : Tuple = temp.next
A_ : str = new_node
if index == len(self ) - 1: # insert at tail
A_ : Optional[int] = new_node
def UpperCAmelCase_ ( self ) -> List[Any]:
return self.delete_nth(0 )
def UpperCAmelCase_ ( self ) -> Any:
return self.delete_nth(len(self ) - 1 )
def UpperCAmelCase_ ( self , _lowerCamelCase = 0 ) -> Any:
if not 0 <= index < len(self ):
raise IndexError("""list index out of range.""" )
A_ : int = self.head
if self.head == self.tail: # just one node
A_ : int = None
elif index == 0: # delete head node
A_ : Union[str, Any] = self.tail.next.next
A_ : Tuple = self.head.next
else:
A_ : Optional[int] = self.head
for _ in range(index - 1 ):
A_ : Tuple = temp.next
A_ : Any = temp.next
A_ : Tuple = temp.next.next
if index == len(self ) - 1: # delete at tail
A_ : List[str] = temp
return delete_node.data
def UpperCAmelCase_ ( self ) -> bool:
return len(self ) == 0
def UpperCAmelCase ( ) -> None:
"""simple docstring"""
A_ : Any = CircularLinkedList()
assert len(a_ ) == 0
assert circular_linked_list.is_empty() is True
assert str(a_ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(a_ ) == i
circular_linked_list.insert_nth(a_ , i + 1 )
assert str(a_ ) == "->".join(str(a_ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(a_ ) == "->".join(str(a_ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(a_ ) == "->".join(str(a_ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(a_ ) == "->".join(str(a_ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(a_ ) == "->".join(str(a_ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 164 | 1 |
"""simple docstring"""
import math
def _snake_case ( UpperCAmelCase_ : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCAmelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _snake_case ( UpperCAmelCase_ : float = 0.1 ):
A__ = 3
A__ = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(UpperCAmelCase_ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 335 |
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
SCREAMING_SNAKE_CASE_ : Dict = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
SCREAMING_SNAKE_CASE_ : str = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
SCREAMING_SNAKE_CASE_ : List[str] = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] , reference_urls=[
"""https://github.com/m-popovic/chrF""",
] , )
def UpperCamelCase ( self: List[Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: int = CHRF.CHAR_ORDER , UpperCamelCase: int = CHRF.WORD_ORDER , UpperCamelCase: int = CHRF.BETA , UpperCamelCase: bool = False , UpperCamelCase: bool = False , UpperCamelCase: bool = False , ):
"""simple docstring"""
A__ = len(references[0] )
if any(len(UpperCamelCase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
A__ = [[refs[i] for refs in references] for i in range(UpperCamelCase )]
A__ = CHRF(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
A__ = sb_chrf.corpus_score(UpperCamelCase , UpperCamelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 335 | 1 |
"""simple docstring"""
from __future__ import annotations
_A = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def UpperCAmelCase ( a_, a_, a_, a_, a_, ):
'''simple docstring'''
lowerCamelCase : Dict = [
[0 for col in range(len(grid[0] ) )] for row in range(len(a_ ) )
] # the reference grid
lowerCamelCase : Union[str, Any] = 1
lowerCamelCase : Any = [
[0 for col in range(len(grid[0] ) )] for row in range(len(a_ ) )
] # the action grid
lowerCamelCase : List[str] = init[0]
lowerCamelCase : Optional[Any] = init[1]
lowerCamelCase : List[Any] = 0
lowerCamelCase : List[str] = g + heuristic[x][y] # cost from starting cell to destination cell
lowerCamelCase : Union[str, Any] = [[f, g, x, y]]
lowerCamelCase : Union[str, Any] = False # flag that is set when search is complete
lowerCamelCase : str = False # flag set if we can't find expand
while not found and not resign:
if len(a_ ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
lowerCamelCase : int = cell.pop()
lowerCamelCase : str = next_cell[2]
lowerCamelCase : Union[str, Any] = next_cell[3]
lowerCamelCase : List[str] = next_cell[1]
if x == goal[0] and y == goal[1]:
lowerCamelCase : Any = True
else:
for i in range(len(a_ ) ): # to try out different valid actions
lowerCamelCase : Tuple = x + DIRECTIONS[i][0]
lowerCamelCase : Union[str, Any] = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(a_ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
lowerCamelCase : str = g + cost
lowerCamelCase : Tuple = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
lowerCamelCase : Union[str, Any] = 1
lowerCamelCase : Any = i
lowerCamelCase : Any = []
lowerCamelCase : Optional[int] = goal[0]
lowerCamelCase : Dict = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
lowerCamelCase : Dict = x - DIRECTIONS[action[x][y]][0]
lowerCamelCase : Dict = y - DIRECTIONS[action[x][y]][1]
lowerCamelCase : Optional[Any] = xa
lowerCamelCase : Union[str, Any] = ya
invpath.append([x, y] )
lowerCamelCase : Optional[int] = []
for i in range(len(a_ ) ):
path.append(invpath[len(a_ ) - 1 - i] )
return path, action
if __name__ == "__main__":
_A = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
_A = [0, 0]
# all coordinates are given in format [y,x]
_A = [len(grid) - 1, len(grid[0]) - 1]
_A = 1
# the cost map which pushes the path closer to the goal
_A = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
_A = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
_A = 9_9
_A , _A = search(grid, init, goal, cost, heuristic)
print('ACTION MAP')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 205 |
"""simple docstring"""
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
_A = logging.getLogger(__name__)
class _lowercase :
def __init__( self ) -> Tuple:
lowerCamelCase : Dict = False
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) -> Any:
if not self.initialized:
lowerCamelCase : Optional[int] = RagRetriever(
UpperCAmelCase_ , question_encoder_tokenizer=UpperCAmelCase_ , generator_tokenizer=UpperCAmelCase_ , index=UpperCAmelCase_ , init_retrieval=UpperCAmelCase_ , )
lowerCamelCase : Union[str, Any] = True
def _UpperCamelCase ( self ) -> Union[str, Any]:
self.retriever.index.init_index()
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_ ) -> str:
lowerCamelCase , lowerCamelCase : int = self.retriever._main_retrieve(UpperCAmelCase_ , UpperCAmelCase_ )
return doc_ids, retrieved_doc_embeds
class _lowercase ( __UpperCAmelCase ):
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None ) -> Dict:
if index is not None and index.is_initialized() and len(UpperCAmelCase_ ) > 0:
raise ValueError(
'When using Ray for distributed fine-tuning, '
'you\'ll need to provide the paths instead, '
'as the dataset and the index are loaded '
'separately. More info in examples/rag/use_own_knowledge_dataset.py ' )
super().__init__(
UpperCAmelCase_ , question_encoder_tokenizer=UpperCAmelCase_ , generator_tokenizer=UpperCAmelCase_ , index=UpperCAmelCase_ , init_retrieval=UpperCAmelCase_ , )
lowerCamelCase : List[str] = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
for worker in self.retrieval_workers
] )
def _UpperCamelCase ( self ) -> Dict:
logger.info('initializing retrieval' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_ ) -> List[Any]:
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
lowerCamelCase : Tuple = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
lowerCamelCase , lowerCamelCase : Tuple = ray.get(random_worker.retrieve.remote(UpperCAmelCase_ , UpperCAmelCase_ ) )
else:
lowerCamelCase , lowerCamelCase : int = self._main_retrieve(UpperCAmelCase_ , UpperCAmelCase_ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(UpperCAmelCase_ )
@classmethod
def _UpperCamelCase ( cls , UpperCAmelCase_ , UpperCAmelCase_=None , **UpperCAmelCase_ ) -> Dict:
return super(UpperCAmelCase_ , cls ).get_tokenizers(UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
@classmethod
def _UpperCamelCase ( cls , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None , **UpperCAmelCase_ ) -> Dict:
lowerCamelCase : str = kwargs.pop('config' , UpperCAmelCase_ ) or RagConfig.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
lowerCamelCase : Any = RagTokenizer.from_pretrained(UpperCAmelCase_ , config=UpperCAmelCase_ )
lowerCamelCase : Any = rag_tokenizer.question_encoder
lowerCamelCase : Union[str, Any] = rag_tokenizer.generator
if indexed_dataset is not None:
lowerCamelCase : int = 'custom'
lowerCamelCase : int = CustomHFIndex(config.retrieval_vector_size , UpperCAmelCase_ )
else:
lowerCamelCase : Optional[Any] = cls._build_index(UpperCAmelCase_ )
return cls(
UpperCAmelCase_ , question_encoder_tokenizer=UpperCAmelCase_ , generator_tokenizer=UpperCAmelCase_ , retrieval_workers=UpperCAmelCase_ , index=UpperCAmelCase_ , )
| 205 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.