code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class a_ (unittest.TestCase ):
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[Any] = tempfile.mkdtemp()
# fmt: off
_lowerCAmelCase : Tuple = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
_lowerCAmelCase : Optional[Any] = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
_lowerCAmelCase : int = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
_lowerCAmelCase : List[Any] = {"""unk_token""": """<unk>"""}
_lowerCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(snake_case_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(snake_case_ ) )
_lowerCAmelCase : Optional[int] = {
"""do_resize""": True,
"""size""": 2_0,
"""do_center_crop""": True,
"""crop_size""": 1_8,
"""do_normalize""": True,
"""image_mean""": [0.4814_5466, 0.457_8275, 0.4082_1073],
"""image_std""": [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
_lowerCAmelCase : Dict = os.path.join(self.tmpdirname , snake_case_ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(snake_case_ , snake_case_ )
def __UpperCamelCase ( self , **snake_case_ ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def __UpperCamelCase ( self , **snake_case_ ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **snake_case_ )
def __UpperCamelCase ( self , **snake_case_ ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **snake_case_ )
def __UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
_lowerCAmelCase : Tuple = [Image.fromarray(np.moveaxis(snake_case_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = self.get_tokenizer()
_lowerCAmelCase : Optional[int] = self.get_rust_tokenizer()
_lowerCAmelCase : Optional[int] = self.get_image_processor()
_lowerCAmelCase : List[str] = CLIPSegProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
processor_slow.save_pretrained(self.tmpdirname )
_lowerCAmelCase : int = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case_ )
_lowerCAmelCase : Tuple = CLIPSegProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
processor_fast.save_pretrained(self.tmpdirname )
_lowerCAmelCase : str = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , snake_case_ )
self.assertIsInstance(processor_fast.tokenizer , snake_case_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , snake_case_ )
self.assertIsInstance(processor_fast.image_processor , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Any = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_lowerCAmelCase : List[str] = self.get_image_processor(do_normalize=snake_case_ , padding_value=1.0 )
_lowerCAmelCase : Tuple = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[Any] = self.get_image_processor()
_lowerCAmelCase : str = self.get_tokenizer()
_lowerCAmelCase : List[Any] = CLIPSegProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
_lowerCAmelCase : Dict = self.prepare_image_inputs()
_lowerCAmelCase : List[str] = image_processor(snake_case_ , return_tensors="""np""" )
_lowerCAmelCase : Any = processor(images=snake_case_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[Any] = self.get_image_processor()
_lowerCAmelCase : str = self.get_tokenizer()
_lowerCAmelCase : Optional[Any] = CLIPSegProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
_lowerCAmelCase : str = """lower newer"""
_lowerCAmelCase : Optional[int] = processor(text=snake_case_ )
_lowerCAmelCase : Dict = tokenizer(snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = self.get_image_processor()
_lowerCAmelCase : Dict = self.get_tokenizer()
_lowerCAmelCase : str = CLIPSegProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
_lowerCAmelCase : Optional[int] = """lower newer"""
_lowerCAmelCase : List[Any] = self.prepare_image_inputs()
_lowerCAmelCase : int = processor(text=snake_case_ , images=snake_case_ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(snake_case_ ):
processor()
def __UpperCamelCase ( self ):
_lowerCAmelCase : Any = self.get_image_processor()
_lowerCAmelCase : int = self.get_tokenizer()
_lowerCAmelCase : Union[str, Any] = CLIPSegProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
_lowerCAmelCase : Optional[int] = self.prepare_image_inputs()
_lowerCAmelCase : str = self.prepare_image_inputs()
_lowerCAmelCase : List[str] = processor(images=snake_case_ , visual_prompt=snake_case_ )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """conditional_pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(snake_case_ ):
processor()
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = self.get_image_processor()
_lowerCAmelCase : Tuple = self.get_tokenizer()
_lowerCAmelCase : int = CLIPSegProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
_lowerCAmelCase : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase : Optional[Any] = processor.batch_decode(snake_case_ )
_lowerCAmelCase : str = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
| 309
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"""vocab_file""": """vocab.txt"""}
UpperCamelCase_ = {
"""vocab_file""": {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt""",
}
}
UpperCamelCase_ = {
"""YituTech/conv-bert-base""": 5_12,
"""YituTech/conv-bert-medium-small""": 5_12,
"""YituTech/conv-bert-small""": 5_12,
}
UpperCamelCase_ = {
"""YituTech/conv-bert-base""": {"""do_lower_case""": True},
"""YituTech/conv-bert-medium-small""": {"""do_lower_case""": True},
"""YituTech/conv-bert-small""": {"""do_lower_case""": True},
}
class a_ (_a ):
__lowerCAmelCase : Any = VOCAB_FILES_NAMES
__lowerCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Dict = PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Optional[int] = ConvBertTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=True , snake_case_="[UNK]" , snake_case_="[SEP]" , snake_case_="[PAD]" , snake_case_="[CLS]" , snake_case_="[MASK]" , snake_case_=True , snake_case_=None , **snake_case_ , ):
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
_lowerCAmelCase : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , snake_case_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , snake_case_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , snake_case_ ) != tokenize_chinese_chars
):
_lowerCAmelCase : Dict = getattr(snake_case_ , normalizer_state.pop("""type""" ) )
_lowerCAmelCase : List[str] = do_lower_case
_lowerCAmelCase : str = strip_accents
_lowerCAmelCase : List[Any] = tokenize_chinese_chars
_lowerCAmelCase : List[Any] = normalizer_class(**snake_case_ )
_lowerCAmelCase : str = do_lower_case
def __UpperCamelCase ( self , snake_case_ , snake_case_=None ):
_lowerCAmelCase : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : Optional[Any] = [self.sep_token_id]
_lowerCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : Any = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
| 309
| 1
|
'''simple docstring'''
from __future__ import annotations
import math
def _UpperCAmelCase ( _lowerCamelCase : float , _lowerCamelCase : int ) -> float:
_lowerCAmelCase : Optional[Any] = u
for i in range(1 , _lowerCamelCase ):
_lowerCAmelCase : Tuple = temp * (u - i)
return temp
def _UpperCAmelCase ( ) -> None:
_lowerCAmelCase : Tuple = int(input("""enter the numbers of values: """ ) )
_lowerCAmelCase : list[list[float]] = []
for _ in range(_lowerCamelCase ):
y.append([] )
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
y[i].append(_lowerCamelCase )
_lowerCAmelCase : Any = 0
print("""enter the values of parameters in a list: """ )
_lowerCAmelCase : List[str] = list(map(_lowerCamelCase , input().split() ) )
print("""enter the values of corresponding parameters: """ )
for i in range(_lowerCamelCase ):
_lowerCAmelCase : int = float(input() )
_lowerCAmelCase : Optional[Any] = int(input("""enter the value to interpolate: """ ) )
_lowerCAmelCase : Optional[int] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , _lowerCamelCase ):
for j in range(n - i ):
_lowerCAmelCase : Tuple = y[j + 1][i - 1] - y[j][i - 1]
_lowerCAmelCase : Tuple = y[0][0]
for i in range(1 , _lowerCamelCase ):
summ += (ucal(_lowerCamelCase , _lowerCamelCase ) * y[0][i]) / math.factorial(_lowerCamelCase )
print(f'the value at {value} is {summ}' )
if __name__ == "__main__":
main()
| 309
|
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a_ :
def __init__( self ):
_lowerCAmelCase : Any = """"""
_lowerCAmelCase : List[Any] = """"""
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : int = 0
_lowerCAmelCase : str = 2_5_6
_lowerCAmelCase : List[str] = 0
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Dict = 0
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : str = cva.imread(snake_case_ , 0 )
_lowerCAmelCase : List[str] = copy.deepcopy(self.img )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] , label="""x""" )
_lowerCAmelCase : List[Any] = np.sum(snake_case_ )
for i in range(len(snake_case_ ) ):
_lowerCAmelCase : Optional[int] = x[i] / self.k
self.sk += prk
_lowerCAmelCase : Any = (self.L - 1) * self.sk
if self.rem != 0:
_lowerCAmelCase : Dict = int(last % last )
_lowerCAmelCase : str = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(snake_case_ )
_lowerCAmelCase : str = int(np.ma.count(self.img ) / self.img[1].size )
_lowerCAmelCase : Union[str, Any] = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
_lowerCAmelCase : Union[str, Any] = self.img[j][i]
if num != self.last_list[num]:
_lowerCAmelCase : List[str] = self.last_list[num]
cva.imwrite("""output_data/output.jpg""" , self.img )
def __UpperCamelCase ( self ):
plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] )
def __UpperCamelCase ( self ):
cva.imshow("""Output-Image""" , self.img )
cva.imshow("""Input-Image""" , self.original_image )
cva.waitKey(5_0_0_0 )
cva.destroyAllWindows()
if __name__ == "__main__":
UpperCamelCase_ = os.path.join(os.path.basename(__file__), """image_data/input.jpg""")
UpperCamelCase_ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 309
| 1
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""BridgeTower/bridgetower-base""": """https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json""",
"""BridgeTower/bridgetower-base-itm-mlm""": (
"""https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"""
),
}
class a_ (_a ):
__lowerCAmelCase : int = """bridgetower_vision_model"""
def __init__( self , snake_case_=7_6_8 , snake_case_=1_2 , snake_case_=3 , snake_case_=1_6 , snake_case_=2_8_8 , snake_case_=1 , snake_case_=1E-05 , snake_case_=False , snake_case_=True , snake_case_=False , **snake_case_ , ):
super().__init__(**snake_case_ )
_lowerCAmelCase : Optional[int] = hidden_size
_lowerCAmelCase : Optional[Any] = num_hidden_layers
_lowerCAmelCase : Dict = num_channels
_lowerCAmelCase : Union[str, Any] = patch_size
_lowerCAmelCase : Tuple = image_size
_lowerCAmelCase : Optional[Any] = initializer_factor
_lowerCAmelCase : str = layer_norm_eps
_lowerCAmelCase : List[str] = stop_gradient
_lowerCAmelCase : Dict = share_layernorm
_lowerCAmelCase : str = remove_last_layer
@classmethod
def __UpperCamelCase ( cls , snake_case_ , **snake_case_ ):
_lowerCAmelCase , _lowerCAmelCase : List[str] = cls.get_config_dict(snake_case_ , **snake_case_ )
if config_dict.get("""model_type""" ) == "bridgetower":
_lowerCAmelCase : Tuple = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(snake_case_ , **snake_case_ )
class a_ (_a ):
__lowerCAmelCase : List[str] = """bridgetower_text_model"""
def __init__( self , snake_case_=5_0_2_6_5 , snake_case_=7_6_8 , snake_case_=1_2 , snake_case_=1_2 , snake_case_=1 , snake_case_=3_0_7_2 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_1_4 , snake_case_=1 , snake_case_=1E-05 , snake_case_=1 , snake_case_=0 , snake_case_=2 , snake_case_="absolute" , snake_case_=True , **snake_case_ , ):
super().__init__(**snake_case_ )
_lowerCAmelCase : Any = vocab_size
_lowerCAmelCase : int = hidden_size
_lowerCAmelCase : str = num_hidden_layers
_lowerCAmelCase : str = num_attention_heads
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : Optional[Any] = initializer_factor
_lowerCAmelCase : List[str] = intermediate_size
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : List[Any] = attention_probs_dropout_prob
_lowerCAmelCase : Dict = max_position_embeddings
_lowerCAmelCase : Tuple = type_vocab_size
_lowerCAmelCase : Optional[Any] = layer_norm_eps
_lowerCAmelCase : Any = position_embedding_type
_lowerCAmelCase : Any = use_cache
_lowerCAmelCase : int = pad_token_id
_lowerCAmelCase : str = bos_token_id
_lowerCAmelCase : Union[str, Any] = eos_token_id
@classmethod
def __UpperCamelCase ( cls , snake_case_ , **snake_case_ ):
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = cls.get_config_dict(snake_case_ , **snake_case_ )
if config_dict.get("""model_type""" ) == "bridgetower":
_lowerCAmelCase : Dict = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(snake_case_ , **snake_case_ )
class a_ (_a ):
__lowerCAmelCase : List[Any] = """bridgetower"""
def __init__( self , snake_case_=True , snake_case_="gelu" , snake_case_=7_6_8 , snake_case_=1 , snake_case_=1E-05 , snake_case_=False , snake_case_="add" , snake_case_=1_2 , snake_case_=6 , snake_case_=False , snake_case_=False , snake_case_=None , snake_case_=None , **snake_case_ , ):
# TODO: remove this once the Hub files are updated.
_lowerCAmelCase : List[str] = kwargs.pop("""text_config_dict""" , snake_case_ )
_lowerCAmelCase : Any = kwargs.pop("""vision_config_dict""" , snake_case_ )
super().__init__(**snake_case_ )
_lowerCAmelCase : str = share_cross_modal_transformer_layers
_lowerCAmelCase : List[Any] = hidden_act
_lowerCAmelCase : Union[str, Any] = hidden_size
_lowerCAmelCase : Dict = initializer_factor
_lowerCAmelCase : Optional[Any] = layer_norm_eps
_lowerCAmelCase : Any = share_link_tower_layers
_lowerCAmelCase : List[str] = link_tower_type
_lowerCAmelCase : int = num_attention_heads
_lowerCAmelCase : Any = num_hidden_layers
_lowerCAmelCase : List[str] = tie_word_embeddings
_lowerCAmelCase : List[Any] = init_layernorm_from_vision_encoder
if text_config is None:
_lowerCAmelCase : Any = {}
logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" )
if vision_config is None:
_lowerCAmelCase : Optional[int] = {}
logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" )
_lowerCAmelCase : str = BridgeTowerTextConfig(**snake_case_ )
_lowerCAmelCase : List[str] = BridgeTowerVisionConfig(**snake_case_ )
@classmethod
def __UpperCamelCase ( cls , snake_case_ , snake_case_ , **snake_case_ ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Tuple = copy.deepcopy(self.__dict__ )
_lowerCAmelCase : Union[str, Any] = self.text_config.to_dict()
_lowerCAmelCase : int = self.vision_config.to_dict()
_lowerCAmelCase : Union[str, Any] = self.__class__.model_type
return output
| 309
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase_ = {
"""configuration_groupvit""": [
"""GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""GroupViTConfig""",
"""GroupViTOnnxConfig""",
"""GroupViTTextConfig""",
"""GroupViTVisionConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GroupViTModel""",
"""GroupViTPreTrainedModel""",
"""GroupViTTextModel""",
"""GroupViTVisionModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFGroupViTModel""",
"""TFGroupViTPreTrainedModel""",
"""TFGroupViTTextModel""",
"""TFGroupViTVisionModel""",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
| 1
|
'''simple docstring'''
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def _UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : Any ) -> Optional[int]:
_lowerCAmelCase : List[Any] = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"""
_lowerCAmelCase : int = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert("""RGB""" )
_lowerCAmelCase : Optional[int] = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4814_5466, 0.457_8275, 0.4082_1073) , (0.2686_2954, 0.2613_0258, 0.2757_7711) ),
] )
_lowerCAmelCase : Optional[Any] = transform(_lowerCamelCase ).unsqueeze(0 ).to(_lowerCamelCase )
return image
def _UpperCAmelCase ( _lowerCamelCase : Any ) -> Any:
if "visual_encoder" in key:
_lowerCAmelCase : Dict = re.sub("""visual_encoder*""" , """vision_model.encoder""" , _lowerCamelCase )
if "blocks" in key:
_lowerCAmelCase : int = re.sub(R"""blocks""" , """layers""" , _lowerCamelCase )
if "attn" in key:
_lowerCAmelCase : List[Any] = re.sub(R"""attn""" , """self_attn""" , _lowerCamelCase )
if "norm1" in key:
_lowerCAmelCase : Optional[int] = re.sub(R"""norm1""" , """layer_norm1""" , _lowerCamelCase )
if "norm2" in key:
_lowerCAmelCase : Union[str, Any] = re.sub(R"""norm2""" , """layer_norm2""" , _lowerCamelCase )
if "encoder.norm" in key:
_lowerCAmelCase : List[str] = re.sub(R"""encoder.norm""" , """post_layernorm""" , _lowerCamelCase )
if "encoder.patch_embed.proj" in key:
_lowerCAmelCase : List[str] = re.sub(R"""encoder.patch_embed.proj""" , """embeddings.patch_embedding""" , _lowerCamelCase )
if "encoder.pos_embed" in key:
_lowerCAmelCase : Union[str, Any] = re.sub(R"""encoder.pos_embed""" , """embeddings.position_embedding""" , _lowerCamelCase )
if "encoder.cls_token" in key:
_lowerCAmelCase : str = re.sub(R"""encoder.cls_token""" , """embeddings.class_embedding""" , _lowerCamelCase )
if "self_attn" in key:
_lowerCAmelCase : str = re.sub(R"""self_attn.proj""" , """self_attn.projection""" , _lowerCamelCase )
return key
@torch.no_grad()
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any]=None ) -> Any:
if config_path is not None:
_lowerCAmelCase : Optional[Any] = BlipConfig.from_pretrained(_lowerCamelCase )
else:
_lowerCAmelCase : Optional[Any] = BlipConfig(projection_dim=5_12 , text_config={} , vision_config={} )
_lowerCAmelCase : Dict = BlipForConditionalGeneration(_lowerCamelCase ).eval()
_lowerCAmelCase : List[str] = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"""
_lowerCAmelCase : Optional[int] = blip_decoder(pretrained=_lowerCamelCase , image_size=3_84 , vit="""base""" )
_lowerCAmelCase : List[Any] = pt_model.eval()
_lowerCAmelCase : Any = pt_model.state_dict()
for key in modified_state_dict.copy():
_lowerCAmelCase : List[str] = modified_state_dict.pop(_lowerCamelCase )
_lowerCAmelCase : Any = rename_key(_lowerCamelCase )
_lowerCAmelCase : List[str] = value
hf_model.load_state_dict(_lowerCamelCase )
_lowerCAmelCase : Any = 3_84
_lowerCAmelCase : int = load_demo_image(image_size=_lowerCamelCase , device="""cpu""" )
_lowerCAmelCase : Union[str, Any] = BertTokenizer.from_pretrained("""bert-base-uncased""" )
_lowerCAmelCase : Tuple = tokenizer(["""a picture of"""] ).input_ids
_lowerCAmelCase : Any = hf_model.generate(_lowerCamelCase , _lowerCamelCase )
assert out[0].tolist() == [3_05_22, 10_37, 38_61, 19_97, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
_lowerCAmelCase : List[str] = hf_model.generate(_lowerCamelCase )
assert out[0].tolist() == [3_05_22, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(_lowerCamelCase )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
_lowerCAmelCase : List[Any] = (
"""https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"""
)
_lowerCAmelCase : Union[str, Any] = blip_vqa(pretrained=_lowerCamelCase , image_size=_lowerCamelCase , vit="""base""" )
vqa_model.eval()
_lowerCAmelCase : List[Any] = vqa_model.state_dict()
for key in modified_state_dict.copy():
_lowerCAmelCase : Dict = modified_state_dict.pop(_lowerCamelCase )
_lowerCAmelCase : Any = rename_key(_lowerCamelCase )
_lowerCAmelCase : Any = value
_lowerCAmelCase : Optional[Any] = BlipForQuestionAnswering(_lowerCamelCase )
hf_vqa_model.load_state_dict(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = ["""How many dogs are in this image?"""]
_lowerCAmelCase : str = tokenizer(_lowerCamelCase , return_tensors="""pt""" ).input_ids
_lowerCAmelCase : Dict = hf_vqa_model.generate(_lowerCamelCase , _lowerCamelCase )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + """_vqa""" )
_lowerCAmelCase : Union[str, Any] = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"""
_lowerCAmelCase : str = blip_itm(pretrained=_lowerCamelCase , image_size=_lowerCamelCase , vit="""base""" )
itm_model.eval()
_lowerCAmelCase : str = itm_model.state_dict()
for key in modified_state_dict.copy():
_lowerCAmelCase : Optional[int] = modified_state_dict.pop(_lowerCamelCase )
_lowerCAmelCase : List[Any] = rename_key(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = value
_lowerCAmelCase : Union[str, Any] = BlipForImageTextRetrieval(_lowerCamelCase )
_lowerCAmelCase : Tuple = ["""A picture of a woman with a dog sitting in a beach"""]
_lowerCAmelCase : int = tokenizer(
_lowerCamelCase , return_tensors="""pt""" , padding="""max_length""" , truncation=_lowerCamelCase , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(_lowerCamelCase )
hf_itm_model.eval()
_lowerCAmelCase : List[str] = hf_itm_model(_lowerCamelCase , _lowerCamelCase , use_itm_head=_lowerCamelCase )
_lowerCAmelCase : Any = hf_itm_model(_lowerCamelCase , _lowerCamelCase , use_itm_head=_lowerCamelCase )
assert out[0].item() == 0.2110_6874_9427_7954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5698_8453_8650_5127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + """_itm""" )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
UpperCamelCase_ = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 309
|
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
UpperCamelCase_ = 0
UpperCamelCase_ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
UpperCamelCase_ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
UpperCamelCase_ = tuple[int, int]
class a_ :
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
_lowerCAmelCase : Optional[int] = pos_x
_lowerCAmelCase : List[str] = pos_y
_lowerCAmelCase : Tuple = (pos_y, pos_x)
_lowerCAmelCase : List[Any] = goal_x
_lowerCAmelCase : int = goal_y
_lowerCAmelCase : Union[str, Any] = g_cost
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : List[Any] = self.calculate_heuristic()
_lowerCAmelCase : Optional[int] = self.g_cost + self.h_cost
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.pos_x - self.goal_x
_lowerCAmelCase : Optional[int] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(snake_case_ ) + abs(snake_case_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , snake_case_ ):
return self.f_cost < other.f_cost
class a_ :
def __init__( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : Optional[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , snake_case_ )
_lowerCAmelCase : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , snake_case_ )
_lowerCAmelCase : List[str] = [self.start]
_lowerCAmelCase : list[Node] = []
_lowerCAmelCase : List[str] = False
def __UpperCamelCase ( self ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
_lowerCAmelCase : Optional[int] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(snake_case_ )
self.closed_nodes.append(snake_case_ )
_lowerCAmelCase : Optional[int] = self.get_successors(snake_case_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(snake_case_ )
else:
# retrieve the best current path
_lowerCAmelCase : Optional[Any] = self.open_nodes.pop(self.open_nodes.index(snake_case_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(snake_case_ )
else:
self.open_nodes.append(snake_case_ )
return [self.start.pos]
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Union[str, Any] = []
for action in delta:
_lowerCAmelCase : Union[str, Any] = parent.pos_x + action[1]
_lowerCAmelCase : Dict = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(snake_case_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
snake_case_ , snake_case_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , snake_case_ , ) )
return successors
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : List[Any] = node
_lowerCAmelCase : Optional[Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_lowerCAmelCase : Optional[int] = current_node.parent
path.reverse()
return path
class a_ :
def __init__( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : List[str] = AStar(snake_case_ , snake_case_ )
_lowerCAmelCase : int = AStar(snake_case_ , snake_case_ )
_lowerCAmelCase : Optional[int] = False
def __UpperCamelCase ( self ):
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
_lowerCAmelCase : Tuple = self.fwd_astar.open_nodes.pop(0 )
_lowerCAmelCase : Optional[Any] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
snake_case_ , snake_case_ )
self.fwd_astar.closed_nodes.append(snake_case_ )
self.bwd_astar.closed_nodes.append(snake_case_ )
_lowerCAmelCase : List[str] = current_bwd_node
_lowerCAmelCase : Dict = current_fwd_node
_lowerCAmelCase : Any = {
self.fwd_astar: self.fwd_astar.get_successors(snake_case_ ),
self.bwd_astar: self.bwd_astar.get_successors(snake_case_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(snake_case_ )
else:
# retrieve the best current path
_lowerCAmelCase : List[Any] = astar.open_nodes.pop(
astar.open_nodes.index(snake_case_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(snake_case_ )
else:
astar.open_nodes.append(snake_case_ )
return [self.fwd_astar.start.pos]
def __UpperCamelCase ( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : int = self.fwd_astar.retrace_path(snake_case_ )
_lowerCAmelCase : Optional[Any] = self.bwd_astar.retrace_path(snake_case_ )
bwd_path.pop()
bwd_path.reverse()
_lowerCAmelCase : Dict = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
UpperCamelCase_ = (0, 0)
UpperCamelCase_ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
UpperCamelCase_ = time.time()
UpperCamelCase_ = AStar(init, goal)
UpperCamelCase_ = a_star.search()
UpperCamelCase_ = time.time() - start_time
print(F'AStar execution time = {end_time:f} seconds')
UpperCamelCase_ = time.time()
UpperCamelCase_ = BidirectionalAStar(init, goal)
UpperCamelCase_ = time.time() - bd_start_time
print(F'BidirectionalAStar execution time = {bd_end_time:f} seconds')
| 309
| 1
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class a_ (unittest.TestCase ):
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = """laion/clap-htsat-unfused"""
_lowerCAmelCase : int = tempfile.mkdtemp()
def __UpperCamelCase ( self , **snake_case_ ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **snake_case_ )
def __UpperCamelCase ( self , **snake_case_ ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **snake_case_ )
def __UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : List[Any] = self.get_feature_extractor()
_lowerCAmelCase : Union[str, Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : Any = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_lowerCAmelCase : int = self.get_feature_extractor(do_normalize=snake_case_ , padding_value=1.0 )
_lowerCAmelCase : Dict = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = self.get_feature_extractor()
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
_lowerCAmelCase : Union[str, Any] = floats_list((3, 1_0_0_0) )
_lowerCAmelCase : List[str] = feature_extractor(snake_case_ , return_tensors="""np""" )
_lowerCAmelCase : Optional[Any] = processor(audios=snake_case_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = self.get_feature_extractor()
_lowerCAmelCase : List[str] = self.get_tokenizer()
_lowerCAmelCase : Tuple = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
_lowerCAmelCase : Union[str, Any] = """This is a test string"""
_lowerCAmelCase : Union[str, Any] = processor(text=snake_case_ )
_lowerCAmelCase : Optional[int] = tokenizer(snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = self.get_feature_extractor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
_lowerCAmelCase : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase : List[Any] = processor.batch_decode(snake_case_ )
_lowerCAmelCase : Dict = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.get_feature_extractor()
_lowerCAmelCase : Dict = self.get_tokenizer()
_lowerCAmelCase : Optional[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 309
|
'''simple docstring'''
def _UpperCAmelCase ( _lowerCamelCase : list[int] , _lowerCamelCase : str ) -> list[int]:
_lowerCAmelCase : List[Any] = int(_lowerCamelCase )
# Initialize Result
_lowerCAmelCase : Any = []
# Traverse through all denomination
for denomination in reversed(_lowerCamelCase ):
# Find denominations
while int(_lowerCamelCase ) >= int(_lowerCamelCase ):
total_value -= int(_lowerCamelCase )
answer.append(_lowerCamelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
UpperCamelCase_ = []
UpperCamelCase_ = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
UpperCamelCase_ = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F'Denomination {i}: ').strip()))
UpperCamelCase_ = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
UpperCamelCase_ = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
UpperCamelCase_ = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F'Following is minimal change for {value}: ')
UpperCamelCase_ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 309
| 1
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
UpperCamelCase_ = False
class a_ (unittest.TestCase ):
def __UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __UpperCamelCase ( self ):
return 1_2
@property
def __UpperCamelCase ( self ):
return 1_2
@property
def __UpperCamelCase ( self ):
return 3_2
@property
def __UpperCamelCase ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : List[str] = VQModel(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def __UpperCamelCase ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(snake_case_ )
@property
def __UpperCamelCase ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = 1_2
_lowerCAmelCase : Union[str, Any] = 1_2
_lowerCAmelCase : Optional[int] = {
"""attention_bias""": True,
"""cross_attention_dim""": 3_2,
"""attention_head_dim""": height * width,
"""num_attention_heads""": 1,
"""num_vector_embeds""": self.num_embed,
"""num_embeds_ada_norm""": self.num_embeds_ada_norm,
"""norm_num_groups""": 3_2,
"""sample_size""": width,
"""activation_fn""": """geglu-approximate""",
}
_lowerCAmelCase : str = TransformeraDModel(**snake_case_ )
return model
def __UpperCamelCase ( self ):
_lowerCAmelCase : Tuple = """cpu"""
_lowerCAmelCase : Optional[Any] = self.dummy_vqvae
_lowerCAmelCase : Tuple = self.dummy_text_encoder
_lowerCAmelCase : List[str] = self.dummy_tokenizer
_lowerCAmelCase : Optional[int] = self.dummy_transformer
_lowerCAmelCase : Optional[Any] = VQDiffusionScheduler(self.num_embed )
_lowerCAmelCase : int = LearnedClassifierFreeSamplingEmbeddings(learnable=snake_case_ )
_lowerCAmelCase : Union[str, Any] = VQDiffusionPipeline(
vqvae=snake_case_ , text_encoder=snake_case_ , tokenizer=snake_case_ , transformer=snake_case_ , scheduler=snake_case_ , learned_classifier_free_sampling_embeddings=snake_case_ , )
_lowerCAmelCase : Optional[int] = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
_lowerCAmelCase : Any = """teddy bear playing in the pool"""
_lowerCAmelCase : List[str] = torch.Generator(device=snake_case_ ).manual_seed(0 )
_lowerCAmelCase : str = pipe([prompt] , generator=snake_case_ , num_inference_steps=2 , output_type="""np""" )
_lowerCAmelCase : Optional[Any] = output.images
_lowerCAmelCase : List[Any] = torch.Generator(device=snake_case_ ).manual_seed(0 )
_lowerCAmelCase : Dict = pipe(
[prompt] , generator=snake_case_ , output_type="""np""" , return_dict=snake_case_ , num_inference_steps=2 )[0]
_lowerCAmelCase : Tuple = image[0, -3:, -3:, -1]
_lowerCAmelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
_lowerCAmelCase : Tuple = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = """cpu"""
_lowerCAmelCase : List[Any] = self.dummy_vqvae
_lowerCAmelCase : Optional[int] = self.dummy_text_encoder
_lowerCAmelCase : Union[str, Any] = self.dummy_tokenizer
_lowerCAmelCase : Optional[int] = self.dummy_transformer
_lowerCAmelCase : List[str] = VQDiffusionScheduler(self.num_embed )
_lowerCAmelCase : Optional[int] = LearnedClassifierFreeSamplingEmbeddings(
learnable=snake_case_ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
_lowerCAmelCase : Any = VQDiffusionPipeline(
vqvae=snake_case_ , text_encoder=snake_case_ , tokenizer=snake_case_ , transformer=snake_case_ , scheduler=snake_case_ , learned_classifier_free_sampling_embeddings=snake_case_ , )
_lowerCAmelCase : Optional[int] = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
_lowerCAmelCase : Optional[int] = """teddy bear playing in the pool"""
_lowerCAmelCase : List[Any] = torch.Generator(device=snake_case_ ).manual_seed(0 )
_lowerCAmelCase : Optional[int] = pipe([prompt] , generator=snake_case_ , num_inference_steps=2 , output_type="""np""" )
_lowerCAmelCase : Any = output.images
_lowerCAmelCase : Optional[int] = torch.Generator(device=snake_case_ ).manual_seed(0 )
_lowerCAmelCase : Any = pipe(
[prompt] , generator=snake_case_ , output_type="""np""" , return_dict=snake_case_ , num_inference_steps=2 )[0]
_lowerCAmelCase : List[Any] = image[0, -3:, -3:, -1]
_lowerCAmelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
_lowerCAmelCase : Dict = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class a_ (unittest.TestCase ):
def __UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy""" )
_lowerCAmelCase : Union[str, Any] = VQDiffusionPipeline.from_pretrained("""microsoft/vq-diffusion-ithq""" )
_lowerCAmelCase : Tuple = pipeline.to(snake_case_ )
pipeline.set_progress_bar_config(disable=snake_case_ )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
_lowerCAmelCase : int = torch.Generator(device=snake_case_ ).manual_seed(0 )
_lowerCAmelCase : Any = pipeline(
"""teddy bear playing in the pool""" , num_images_per_prompt=1 , generator=snake_case_ , output_type="""np""" , )
_lowerCAmelCase : List[str] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 309
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase_ = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""EncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""TFEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""FlaxEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
| 1
|
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
UpperCamelCase_ = (
"""4S 3H 2C 7S 5H""",
"""9D 8H 2C 6S 7H""",
"""2D 6D 9D TH 7D""",
"""TC 8C 2S JH 6C""",
"""JH 8S TH AH QH""",
"""TS KS 5S 9S AC""",
"""KD 6S 9D TH AD""",
"""KS 8D 4D 9S 4S""", # pair
"""8C 4S KH JS 4D""", # pair
"""QH 8H KD JH 8S""", # pair
"""KC 4H KS 2H 8D""", # pair
"""KD 4S KC 3H 8S""", # pair
"""AH 8S AS KC JH""", # pair
"""3H 4C 4H 3S 2H""", # 2 pairs
"""5S 5D 2C KH KH""", # 2 pairs
"""3C KH 5D 5S KH""", # 2 pairs
"""AS 3C KH AD KH""", # 2 pairs
"""7C 7S 3S 7H 5S""", # 3 of a kind
"""7C 7S KH 2H 7H""", # 3 of a kind
"""AC KH QH AH AS""", # 3 of a kind
"""2H 4D 3C AS 5S""", # straight (low ace)
"""3C 5C 4C 2C 6H""", # straight
"""6S 8S 7S 5H 9H""", # straight
"""JS QS 9H TS KH""", # straight
"""QC KH TS JS AH""", # straight (high ace)
"""8C 9C 5C 3C TC""", # flush
"""3S 8S 9S 5S KS""", # flush
"""4C 5C 9C 8C KC""", # flush
"""JH 8H AH KH QH""", # flush
"""3D 2H 3H 2C 2D""", # full house
"""2H 2C 3S 3H 3D""", # full house
"""KH KC 3S 3H 3D""", # full house
"""JC 6H JS JD JH""", # 4 of a kind
"""JC 7H JS JD JH""", # 4 of a kind
"""JC KH JS JD JH""", # 4 of a kind
"""2S AS 4S 5S 3S""", # straight flush (low ace)
"""2D 6D 3D 4D 5D""", # straight flush
"""5C 6C 3C 7C 4C""", # straight flush
"""JH 9H TH KH QH""", # straight flush
"""JH AH TH KH QH""", # royal flush (high ace straight flush)
)
UpperCamelCase_ = (
("""2H 3H 4H 5H 6H""", """KS AS TS QS JS""", """Loss"""),
("""2H 3H 4H 5H 6H""", """AS AD AC AH JD""", """Win"""),
("""AS AH 2H AD AC""", """JS JD JC JH 3D""", """Win"""),
("""2S AH 2H AS AC""", """JS JD JC JH AD""", """Loss"""),
("""2S AH 2H AS AC""", """2H 3H 5H 6H 7H""", """Win"""),
("""AS 3S 4S 8S 2S""", """2H 3H 5H 6H 7H""", """Win"""),
("""2H 3H 5H 6H 7H""", """2S 3H 4H 5S 6C""", """Win"""),
("""2S 3H 4H 5S 6C""", """3D 4C 5H 6H 2S""", """Tie"""),
("""2S 3H 4H 5S 6C""", """AH AC 5H 6H AS""", """Win"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H AS""", """Loss"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H 7S""", """Win"""),
("""6S AD 7H 4S AS""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S AH 4H 5S KC""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S 3H 6H 7S 9C""", """7H 3C TH 6H 9S""", """Loss"""),
("""4S 5H 6H TS AC""", """3S 5H 6H TS AC""", """Win"""),
("""2S AH 4H 5S 6C""", """AD 4C 5H 6H 2C""", """Tie"""),
("""AS AH 3H AD AC""", """AS AH 2H AD AC""", """Win"""),
("""AH AC 5H 5C QS""", """AH AC 5H 5C KS""", """Loss"""),
("""AH AC 5H 5C QS""", """KH KC 5H 5C QS""", """Win"""),
("""7C 7S KH 2H 7H""", """3C 3S AH 2H 3H""", """Win"""),
("""3C 3S AH 2H 3H""", """7C 7S KH 2H 7H""", """Loss"""),
("""6H 5H 4H 3H 2H""", """5H 4H 3H 2H AH""", """Win"""),
("""5H 4H 3H 2H AH""", """5H 4H 3H 2H AH""", """Tie"""),
("""5H 4H 3H 2H AH""", """6H 5H 4H 3H 2H""", """Loss"""),
("""AH AD KS KC AC""", """AH KD KH AC KC""", """Win"""),
("""2H 4D 3C AS 5S""", """2H 4D 3C 6S 5S""", """Loss"""),
("""2H 3S 3C 3H 2S""", """3S 3C 2S 2H 2D""", """Win"""),
("""4D 6D 5D 2D JH""", """3S 8S 3H TC KH""", """Loss"""),
("""4S 6C 8S 3S 7S""", """AD KS 2D 7D 7C""", """Loss"""),
("""6S 4C 7H 8C 3H""", """5H JC AH 9D 9C""", """Loss"""),
("""9D 9H JH TC QH""", """3C 2S JS 5C 7H""", """Win"""),
("""2H TC 8S AD 9S""", """4H TS 7H 2C 5C""", """Win"""),
("""9D 3S 2C 7S 7C""", """JC TD 3C TC 9H""", """Loss"""),
)
UpperCamelCase_ = (
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", True),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", False),
("""AS 3S 4S 8S 2S""", True),
)
UpperCamelCase_ = (
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", False),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", True),
)
UpperCamelCase_ = (
("""2H 4D 3C AS 5S""", True, [5, 4, 3, 2, 14]),
("""2H 5D 3C AS 5S""", False, [14, 5, 5, 3, 2]),
("""JH QD KC AS TS""", False, [14, 13, 12, 11, 10]),
("""9D 3S 2C 7S 7C""", False, [9, 7, 7, 3, 2]),
)
UpperCamelCase_ = (
("""JH AH TH KH QH""", 0),
("""JH 9H TH KH QH""", 0),
("""JC KH JS JD JH""", 7),
("""KH KC 3S 3H 3D""", 6),
("""8C 9C 5C 3C TC""", 0),
("""JS QS 9H TS KH""", 0),
("""7C 7S KH 2H 7H""", 3),
("""3C KH 5D 5S KH""", 2),
("""QH 8H KD JH 8S""", 1),
("""2D 6D 9D TH 7D""", 0),
)
UpperCamelCase_ = (
("""JH AH TH KH QH""", 23),
("""JH 9H TH KH QH""", 22),
("""JC KH JS JD JH""", 21),
("""KH KC 3S 3H 3D""", 20),
("""8C 9C 5C 3C TC""", 19),
("""JS QS 9H TS KH""", 18),
("""7C 7S KH 2H 7H""", 17),
("""3C KH 5D 5S KH""", 16),
("""QH 8H KD JH 8S""", 15),
("""2D 6D 9D TH 7D""", 14),
)
def _UpperCAmelCase ( ) -> Dict:
_lowerCAmelCase , _lowerCAmelCase : List[Any] = randrange(len(_lowerCamelCase ) ), randrange(len(_lowerCamelCase ) )
_lowerCAmelCase : Optional[Any] = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)]
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def _UpperCAmelCase ( _lowerCamelCase : int = 1_00 ) -> Optional[Any]:
return (generate_random_hand() for _ in range(_lowerCamelCase ))
@pytest.mark.parametrize("""hand, expected""" , _lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : Optional[int] , _lowerCamelCase : int ) -> Optional[Any]:
assert PokerHand(_lowerCamelCase )._is_flush() == expected
@pytest.mark.parametrize("""hand, expected""" , _lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : int ) -> int:
assert PokerHand(_lowerCamelCase )._is_straight() == expected
@pytest.mark.parametrize("""hand, expected, card_values""" , _lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[int] , _lowerCamelCase : int ) -> Union[str, Any]:
_lowerCAmelCase : int = PokerHand(_lowerCamelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("""hand, expected""" , _lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : str ) -> Optional[int]:
assert PokerHand(_lowerCamelCase )._is_same_kind() == expected
@pytest.mark.parametrize("""hand, expected""" , _lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Any ) -> str:
assert PokerHand(_lowerCamelCase )._hand_type == expected
@pytest.mark.parametrize("""hand, other, expected""" , _lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Any , _lowerCamelCase : List[Any] ) -> Dict:
assert PokerHand(_lowerCamelCase ).compare_with(PokerHand(_lowerCamelCase ) ) == expected
@pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() )
def _UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any] ) -> Any:
assert PokerHand(_lowerCamelCase ).compare_with(PokerHand(_lowerCamelCase ) ) == expected
def _UpperCAmelCase ( ) -> Dict:
_lowerCAmelCase : Optional[Any] = [PokerHand(_lowerCamelCase ) for hand in SORTED_HANDS]
_lowerCAmelCase : Any = poker_hands.copy()
shuffle(_lowerCamelCase )
_lowerCAmelCase : List[str] = chain(sorted(_lowerCamelCase ) )
for index, hand in enumerate(_lowerCamelCase ):
assert hand == poker_hands[index]
def _UpperCAmelCase ( ) -> List[Any]:
# Test that five high straights are compared correctly.
_lowerCAmelCase : Dict = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )]
pokerhands.sort(reverse=_lowerCamelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def _UpperCAmelCase ( ) -> str:
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
_lowerCAmelCase : Optional[int] = PokerHand("""2C 4S AS 3D 5C""" )
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : Optional[Any] = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def _UpperCAmelCase ( ) -> Any:
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Optional[Any] = os.path.abspath(os.path.dirname(_lowerCamelCase ) )
_lowerCAmelCase : str = os.path.join(_lowerCamelCase , """poker_hands.txt""" )
with open(_lowerCamelCase ) as file_hand:
for line in file_hand:
_lowerCAmelCase : List[Any] = line[:14].strip()
_lowerCAmelCase : Optional[int] = line[15:].strip()
_lowerCAmelCase , _lowerCAmelCase : Any = PokerHand(_lowerCamelCase ), PokerHand(_lowerCamelCase )
_lowerCAmelCase : Any = player.compare_with(_lowerCamelCase )
if output == "Win":
answer += 1
assert answer == 3_76
| 309
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class a_ (unittest.TestCase ):
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = """laion/clap-htsat-unfused"""
_lowerCAmelCase : int = tempfile.mkdtemp()
def __UpperCamelCase ( self , **snake_case_ ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **snake_case_ )
def __UpperCamelCase ( self , **snake_case_ ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **snake_case_ )
def __UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : List[Any] = self.get_feature_extractor()
_lowerCAmelCase : Union[str, Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : Any = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_lowerCAmelCase : int = self.get_feature_extractor(do_normalize=snake_case_ , padding_value=1.0 )
_lowerCAmelCase : Dict = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = self.get_feature_extractor()
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
_lowerCAmelCase : Union[str, Any] = floats_list((3, 1_0_0_0) )
_lowerCAmelCase : List[str] = feature_extractor(snake_case_ , return_tensors="""np""" )
_lowerCAmelCase : Optional[Any] = processor(audios=snake_case_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = self.get_feature_extractor()
_lowerCAmelCase : List[str] = self.get_tokenizer()
_lowerCAmelCase : Tuple = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
_lowerCAmelCase : Union[str, Any] = """This is a test string"""
_lowerCAmelCase : Union[str, Any] = processor(text=snake_case_ )
_lowerCAmelCase : Optional[int] = tokenizer(snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = self.get_feature_extractor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
_lowerCAmelCase : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase : List[Any] = processor.batch_decode(snake_case_ )
_lowerCAmelCase : Dict = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.get_feature_extractor()
_lowerCAmelCase : Dict = self.get_tokenizer()
_lowerCAmelCase : Optional[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 309
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class a_ :
def __init__( self , snake_case_ , ):
_lowerCAmelCase : int = parent
_lowerCAmelCase : List[str] = 1_3
_lowerCAmelCase : int = 7
_lowerCAmelCase : Any = True
_lowerCAmelCase : Union[str, Any] = True
_lowerCAmelCase : Optional[Any] = True
_lowerCAmelCase : List[Any] = 9_9
_lowerCAmelCase : Optional[int] = 3_2
_lowerCAmelCase : Any = 2
_lowerCAmelCase : Union[str, Any] = 4
_lowerCAmelCase : Any = 3_7
_lowerCAmelCase : Any = """gelu"""
_lowerCAmelCase : List[Any] = 0.1
_lowerCAmelCase : Any = 0.1
_lowerCAmelCase : Dict = 5_1_2
_lowerCAmelCase : Optional[int] = 1_6
_lowerCAmelCase : Dict = 2
_lowerCAmelCase : str = 0.02
_lowerCAmelCase : Optional[Any] = 3
_lowerCAmelCase : str = 4
_lowerCAmelCase : Dict = None
def __UpperCamelCase ( self ):
_lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Any = None
if self.use_input_mask:
_lowerCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase : str = None
_lowerCAmelCase : Dict = None
_lowerCAmelCase : Tuple = None
if self.use_labels:
_lowerCAmelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase : int = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self ):
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : List[Any] = self.prepare_config_and_inputs()
_lowerCAmelCase : Tuple = True
_lowerCAmelCase : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : Union[str, Any] = TFEsmModel(config=snake_case_ )
_lowerCAmelCase : str = {"""input_ids""": input_ids, """attention_mask""": input_mask}
_lowerCAmelCase : Any = model(snake_case_ )
_lowerCAmelCase : int = [input_ids, input_mask]
_lowerCAmelCase : Optional[Any] = model(snake_case_ )
_lowerCAmelCase : List[Any] = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
_lowerCAmelCase : Any = True
_lowerCAmelCase : Dict = TFEsmModel(config=snake_case_ )
_lowerCAmelCase : List[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""encoder_hidden_states""": encoder_hidden_states,
"""encoder_attention_mask""": encoder_attention_mask,
}
_lowerCAmelCase : Tuple = model(snake_case_ )
_lowerCAmelCase : Any = [input_ids, input_mask]
_lowerCAmelCase : str = model(snake_case_ , encoder_hidden_states=snake_case_ )
# Also check the case where encoder outputs are not passed
_lowerCAmelCase : str = model(snake_case_ , attention_mask=snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : Union[str, Any] = TFEsmForMaskedLM(config=snake_case_ )
_lowerCAmelCase : List[str] = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : Tuple = self.num_labels
_lowerCAmelCase : List[str] = TFEsmForTokenClassification(config=snake_case_ )
_lowerCAmelCase : int = {"""input_ids""": input_ids, """attention_mask""": input_mask}
_lowerCAmelCase : List[str] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : List[Any] = config_and_inputs
_lowerCAmelCase : Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class a_ (_a , _a , unittest.TestCase ):
__lowerCAmelCase : Optional[int] = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
__lowerCAmelCase : List[Any] = (
{
"""feature-extraction""": TFEsmModel,
"""fill-mask""": TFEsmForMaskedLM,
"""text-classification""": TFEsmForSequenceClassification,
"""token-classification""": TFEsmForTokenClassification,
"""zero-shot""": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowerCAmelCase : Union[str, Any] = False
__lowerCAmelCase : Tuple = False
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = TFEsmModelTester(self )
_lowerCAmelCase : Optional[Any] = ConfigTester(self , config_class=snake_case_ , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def __UpperCamelCase ( self ):
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
@slow
def __UpperCamelCase ( self ):
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Union[str, Any] = TFEsmModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@unittest.skip("""Protein models do not support embedding resizing.""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip("""Protein models do not support embedding resizing.""" )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : List[str] = model_class(snake_case_ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
_lowerCAmelCase : str = model.get_bias()
assert isinstance(snake_case_ , snake_case_ )
for k, v in name.items():
assert isinstance(snake_case_ , tf.Variable )
else:
_lowerCAmelCase : Tuple = model.get_output_embeddings()
assert x is None
_lowerCAmelCase : Optional[Any] = model.get_bias()
assert name is None
@require_tf
class a_ (unittest.TestCase ):
@slow
def __UpperCamelCase ( self ):
_lowerCAmelCase : Tuple = TFEsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
_lowerCAmelCase : int = tf.constant([[0, 1, 2, 3, 4, 5]] )
_lowerCAmelCase : Optional[int] = model(snake_case_ )[0]
_lowerCAmelCase : Tuple = [1, 6, 3_3]
self.assertEqual(list(output.numpy().shape ) , snake_case_ )
# compare the actual values for a slice.
_lowerCAmelCase : Dict = tf.constant(
[
[
[8.92_1518, -10.58_9814, -6.467_1307],
[-6.396_7156, -13.91_1377, -1.121_1915],
[-7.78_1247, -13.95_1557, -3.74_0592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[Any] = TFEsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
_lowerCAmelCase : int = tf.constant([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
_lowerCAmelCase : Union[str, Any] = model(snake_case_ )[0]
# compare the actual values for a slice.
_lowerCAmelCase : str = tf.constant(
[
[
[0.1444_3092, 0.5412_5327, 0.324_7739],
[0.3034_0484, 0.0052_6676, 0.3107_7722],
[0.3227_8043, -0.2498_7096, 0.341_4628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 309
|
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = """▁"""
UpperCamelCase_ = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
UpperCamelCase_ = {
"""vocab_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json""",
},
"""spm_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model""",
},
"""tokenizer_config_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json""",
},
}
UpperCamelCase_ = {
"""facebook/m2m100_418M""": 10_24,
}
# fmt: off
UpperCamelCase_ = {
"""m2m100""": ["""af""", """am""", """ar""", """ast""", """az""", """ba""", """be""", """bg""", """bn""", """br""", """bs""", """ca""", """ceb""", """cs""", """cy""", """da""", """de""", """el""", """en""", """es""", """et""", """fa""", """ff""", """fi""", """fr""", """fy""", """ga""", """gd""", """gl""", """gu""", """ha""", """he""", """hi""", """hr""", """ht""", """hu""", """hy""", """id""", """ig""", """ilo""", """is""", """it""", """ja""", """jv""", """ka""", """kk""", """km""", """kn""", """ko""", """lb""", """lg""", """ln""", """lo""", """lt""", """lv""", """mg""", """mk""", """ml""", """mn""", """mr""", """ms""", """my""", """ne""", """nl""", """no""", """ns""", """oc""", """or""", """pa""", """pl""", """ps""", """pt""", """ro""", """ru""", """sd""", """si""", """sk""", """sl""", """so""", """sq""", """sr""", """ss""", """su""", """sv""", """sw""", """ta""", """th""", """tl""", """tn""", """tr""", """uk""", """ur""", """uz""", """vi""", """wo""", """xh""", """yi""", """yo""", """zh""", """zu"""],
"""wmt21""": ["""en""", """ha""", """is""", """ja""", """cs""", """ru""", """zh""", """de"""]
}
class a_ (_a ):
__lowerCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
__lowerCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Dict = ["""input_ids""", """attention_mask"""]
__lowerCAmelCase : List[int] = []
__lowerCAmelCase : List[int] = []
def __init__( self , snake_case_ , snake_case_ , snake_case_=None , snake_case_=None , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<pad>" , snake_case_="<unk>" , snake_case_="m2m100" , snake_case_ = None , snake_case_=8 , **snake_case_ , ):
_lowerCAmelCase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCAmelCase : Optional[Any] = language_codes
_lowerCAmelCase : Tuple = FAIRSEQ_LANGUAGE_CODES[language_codes]
_lowerCAmelCase : str = {lang_code: f'__{lang_code}__' for lang_code in fairseq_language_code}
_lowerCAmelCase : int = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(snake_case_ )
for lang_code in fairseq_language_code
if self.get_lang_token(snake_case_ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=snake_case_ , tgt_lang=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , language_codes=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=snake_case_ , **snake_case_ , )
_lowerCAmelCase : Optional[int] = vocab_file
_lowerCAmelCase : Any = load_json(snake_case_ )
_lowerCAmelCase : str = {v: k for k, v in self.encoder.items()}
_lowerCAmelCase : Union[str, Any] = spm_file
_lowerCAmelCase : Tuple = load_spm(snake_case_ , self.sp_model_kwargs )
_lowerCAmelCase : int = len(self.encoder )
_lowerCAmelCase : Union[str, Any] = {
self.get_lang_token(snake_case_ ): self.encoder_size + i for i, lang_code in enumerate(snake_case_ )
}
_lowerCAmelCase : List[str] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(snake_case_ )}
_lowerCAmelCase : Optional[Any] = {v: k for k, v in self.lang_token_to_id.items()}
_lowerCAmelCase : Any = src_lang if src_lang is not None else """en"""
_lowerCAmelCase : Optional[int] = tgt_lang
_lowerCAmelCase : Tuple = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
_lowerCAmelCase : List[Any] = num_madeup_words
@property
def __UpperCamelCase ( self ):
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def __UpperCamelCase ( self ):
return self._src_lang
@src_lang.setter
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCamelCase ( self , snake_case_ ):
return self.sp_model.encode(snake_case_ , out_type=snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(snake_case_ , self.encoder[self.unk_token] )
def __UpperCamelCase ( self , snake_case_ ):
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(snake_case_ , self.unk_token )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = []
_lowerCAmelCase : Optional[int] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(snake_case_ ) + token
_lowerCAmelCase : Optional[Any] = []
else:
current_sub_tokens.append(snake_case_ )
out_string += self.sp_model.decode(snake_case_ )
return out_string.strip()
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
_lowerCAmelCase : List[Any] = [1] * len(self.prefix_tokens )
_lowerCAmelCase : Dict = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(snake_case_ )) + suffix_ones
return prefix_ones + ([0] * len(snake_case_ )) + ([0] * len(snake_case_ )) + suffix_ones
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
_lowerCAmelCase : int = self.__dict__.copy()
_lowerCAmelCase : str = None
return state
def __setstate__( self , snake_case_ ):
_lowerCAmelCase : List[str] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase : str = {}
_lowerCAmelCase : str = load_spm(self.spm_file , self.sp_model_kwargs )
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : Dict = Path(snake_case_ )
if not save_dir.is_dir():
raise OSError(f'{save_directory} should be a directory' )
_lowerCAmelCase : Any = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
_lowerCAmelCase : Any = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , snake_case_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , snake_case_ )
elif not os.path.isfile(self.spm_file ):
with open(snake_case_ , """wb""" ) as fi:
_lowerCAmelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (str(snake_case_ ), str(snake_case_ ))
def __UpperCamelCase ( self , snake_case_ , snake_case_ = "en" , snake_case_ = None , snake_case_ = "ro" , **snake_case_ , ):
_lowerCAmelCase : Union[str, Any] = src_lang
_lowerCAmelCase : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(snake_case_ , snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
_lowerCAmelCase : Dict = src_lang
_lowerCAmelCase : str = self(snake_case_ , add_special_tokens=snake_case_ , **snake_case_ )
_lowerCAmelCase : Union[str, Any] = self.get_lang_id(snake_case_ )
_lowerCAmelCase : Tuple = tgt_lang_id
return inputs
def __UpperCamelCase ( self ):
self.set_src_lang_special_tokens(self.src_lang )
def __UpperCamelCase ( self ):
self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Optional[Any] = self.get_lang_token(snake_case_ )
_lowerCAmelCase : List[Any] = self.lang_token_to_id[lang_token]
_lowerCAmelCase : Any = [self.cur_lang_id]
_lowerCAmelCase : Any = [self.eos_token_id]
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = self.get_lang_token(snake_case_ )
_lowerCAmelCase : int = self.lang_token_to_id[lang_token]
_lowerCAmelCase : str = [self.cur_lang_id]
_lowerCAmelCase : str = [self.eos_token_id]
def __UpperCamelCase ( self , snake_case_ ):
return self.lang_code_to_token[lang]
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : List[str] = self.get_lang_token(snake_case_ )
return self.lang_token_to_id[lang_token]
def _UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
_lowerCAmelCase : Optional[Any] = sentencepiece.SentencePieceProcessor(**_lowerCamelCase )
spm.Load(str(_lowerCamelCase ) )
return spm
def _UpperCAmelCase ( _lowerCamelCase : str ) -> Union[Dict, List]:
with open(_lowerCamelCase , """r""" ) as f:
return json.load(_lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : str ) -> None:
with open(_lowerCamelCase , """w""" ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase , indent=2 )
| 309
| 1
|
'''simple docstring'''
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
UpperCamelCase_ = logging.get_logger(__name__)
@dataclass
class a_ :
__lowerCAmelCase : str = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(glue_processors.keys() )} )
__lowerCAmelCase : str = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
__lowerCAmelCase : int = field(
default=1_2_8 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__lowerCAmelCase : bool = field(
default=_a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.task_name.lower()
class a_ (_a ):
__lowerCAmelCase : Optional[Any] = """train"""
__lowerCAmelCase : Any = """dev"""
__lowerCAmelCase : int = """test"""
class a_ (_a ):
__lowerCAmelCase : GlueDataTrainingArguments
__lowerCAmelCase : str
__lowerCAmelCase : List[InputFeatures]
def __init__( self , snake_case_ , snake_case_ , snake_case_ = None , snake_case_ = Split.train , snake_case_ = None , ):
warnings.warn(
"""This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" , snake_case_ , )
_lowerCAmelCase : Tuple = args
_lowerCAmelCase : str = glue_processors[args.task_name]()
_lowerCAmelCase : str = glue_output_modes[args.task_name]
if isinstance(snake_case_ , snake_case_ ):
try:
_lowerCAmelCase : str = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
# Load data features from cache or dataset file
_lowerCAmelCase : Optional[int] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , )
_lowerCAmelCase : Optional[Any] = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_lowerCAmelCase , _lowerCAmelCase : Tuple = label_list[2], label_list[1]
_lowerCAmelCase : List[str] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowerCAmelCase : List[str] = cached_features_file + """.lock"""
with FileLock(snake_case_ ):
if os.path.exists(snake_case_ ) and not args.overwrite_cache:
_lowerCAmelCase : Any = time.time()
_lowerCAmelCase : int = torch.load(snake_case_ )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
else:
logger.info(f'Creating features from dataset file at {args.data_dir}' )
if mode == Split.dev:
_lowerCAmelCase : List[Any] = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
_lowerCAmelCase : Dict = self.processor.get_test_examples(args.data_dir )
else:
_lowerCAmelCase : Tuple = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
_lowerCAmelCase : Union[str, Any] = examples[:limit_length]
_lowerCAmelCase : Any = glue_convert_examples_to_features(
snake_case_ , snake_case_ , max_length=args.max_seq_length , label_list=snake_case_ , output_mode=self.output_mode , )
_lowerCAmelCase : Union[str, Any] = time.time()
torch.save(self.features , snake_case_ )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ):
return len(self.features )
def __getitem__( self , snake_case_ ):
return self.features[i]
def __UpperCamelCase ( self ):
return self.label_list
| 309
|
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def _UpperCAmelCase ( _lowerCamelCase : Callable , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ) -> np.ndarray:
_lowerCAmelCase : Union[str, Any] = int(np.ceil((x_end - xa) / step_size ) )
_lowerCAmelCase : Tuple = np.zeros((n + 1,) )
_lowerCAmelCase : List[Any] = ya
_lowerCAmelCase : int = xa
for k in range(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = y[k] + step_size * ode_func(_lowerCamelCase , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 309
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class a_ :
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=9_9 , snake_case_=3_2 , snake_case_=2 , snake_case_=4 , snake_case_=3_7 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_1_2 , snake_case_=1_6 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , snake_case_=1_0_0_0 , ):
_lowerCAmelCase : Any = parent
_lowerCAmelCase : List[str] = batch_size
_lowerCAmelCase : Any = seq_length
_lowerCAmelCase : Dict = is_training
_lowerCAmelCase : Dict = use_input_mask
_lowerCAmelCase : Any = use_token_type_ids
_lowerCAmelCase : List[str] = use_labels
_lowerCAmelCase : Dict = vocab_size
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : int = num_hidden_layers
_lowerCAmelCase : Optional[Any] = num_attention_heads
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : int = hidden_act
_lowerCAmelCase : Optional[int] = hidden_dropout_prob
_lowerCAmelCase : Any = attention_probs_dropout_prob
_lowerCAmelCase : str = max_position_embeddings
_lowerCAmelCase : Any = type_vocab_size
_lowerCAmelCase : Dict = type_sequence_label_size
_lowerCAmelCase : Optional[Any] = initializer_range
_lowerCAmelCase : List[str] = num_labels
_lowerCAmelCase : Optional[int] = num_choices
_lowerCAmelCase : Any = scope
_lowerCAmelCase : int = range_bbox
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowerCAmelCase : List[Any] = bbox[i, j, 3]
_lowerCAmelCase : List[str] = bbox[i, j, 1]
_lowerCAmelCase : Union[str, Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowerCAmelCase : str = bbox[i, j, 2]
_lowerCAmelCase : int = bbox[i, j, 0]
_lowerCAmelCase : int = t
_lowerCAmelCase : Optional[int] = tf.convert_to_tensor(snake_case_ )
_lowerCAmelCase : Any = None
if self.use_input_mask:
_lowerCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase : int = None
if self.use_token_type_ids:
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase : Dict = None
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : Optional[Any] = None
if self.use_labels:
_lowerCAmelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase : int = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : List[Any] = TFLayoutLMModel(config=snake_case_ )
_lowerCAmelCase : Any = model(snake_case_ , snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
_lowerCAmelCase : Optional[Any] = model(snake_case_ , snake_case_ , token_type_ids=snake_case_ )
_lowerCAmelCase : List[str] = model(snake_case_ , snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : Tuple = TFLayoutLMForMaskedLM(config=snake_case_ )
_lowerCAmelCase : int = model(snake_case_ , snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : Optional[Any] = self.num_labels
_lowerCAmelCase : int = TFLayoutLMForSequenceClassification(config=snake_case_ )
_lowerCAmelCase : Any = model(snake_case_ , snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : Any = self.num_labels
_lowerCAmelCase : Optional[int] = TFLayoutLMForTokenClassification(config=snake_case_ )
_lowerCAmelCase : Optional[int] = model(snake_case_ , snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : Tuple = TFLayoutLMForQuestionAnswering(config=snake_case_ )
_lowerCAmelCase : Any = model(snake_case_ , snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Dict = config_and_inputs
_lowerCAmelCase : Tuple = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_tf
class a_ (_a , _a , unittest.TestCase ):
__lowerCAmelCase : List[Any] = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
__lowerCAmelCase : List[str] = (
{
"""feature-extraction""": TFLayoutLMModel,
"""fill-mask""": TFLayoutLMForMaskedLM,
"""text-classification""": TFLayoutLMForSequenceClassification,
"""token-classification""": TFLayoutLMForTokenClassification,
"""zero-shot""": TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowerCAmelCase : Union[str, Any] = False
__lowerCAmelCase : str = True
__lowerCAmelCase : Any = 1_0
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = TFLayoutLMModelTester(self )
_lowerCAmelCase : int = ConfigTester(self , config_class=snake_case_ , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
@slow
def __UpperCamelCase ( self ):
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Optional[Any] = TFLayoutLMModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@unittest.skip("""Onnx compliancy broke with TF 2.10""" )
def __UpperCamelCase ( self ):
pass
def _UpperCAmelCase ( ) -> Optional[int]:
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
_lowerCAmelCase : Dict = tf.convert_to_tensor([[1_01,10_19,10_14,10_16,10_37,1_28_49,47_47,10_04,1_42_46,22_78,54_39,45_24,50_02,29_30,21_93,29_30,43_41,32_08,10_05,10_55,21_71,28_48,1_13_00,35_31,1_02],[1_01,40_70,40_34,70_20,10_24,30_58,10_15,10_13,28_61,10_13,60_70,1_92_74,27_72,62_05,2_78_14,1_61_47,1_61_47,43_43,20_47,1_02_83,1_09_69,1_43_89,10_12,23_38,1_02]] ) # noqa: E231
_lowerCAmelCase : List[Any] = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
_lowerCAmelCase : Union[str, Any] = tf.convert_to_tensor([[[0,0,0,0],[4_23,2_37,4_40,2_51],[4_27,2_72,4_41,2_87],[4_19,1_15,4_37,1_29],[9_61,8_85,9_92,9_12],[2_56,38,3_30,58],[2_56,38,3_30,58],[3_36,42,3_53,57],[3_60,39,4_01,56],[3_60,39,4_01,56],[4_11,39,4_71,59],[4_79,41,5_28,59],[5_33,39,6_30,60],[67,1_13,1_34,1_31],[1_41,1_15,2_09,1_32],[68,1_49,1_33,1_66],[1_41,1_49,1_87,1_64],[1_95,1_48,2_87,1_65],[1_95,1_48,2_87,1_65],[1_95,1_48,2_87,1_65],[2_95,1_48,3_49,1_65],[4_41,1_49,4_92,1_66],[4_97,1_49,5_46,1_64],[64,2_01,1_25,2_18],[10_00,10_00,10_00,10_00]],[[0,0,0,0],[6_62,1_50,7_54,1_66],[6_65,1_99,7_42,2_11],[5_19,2_13,5_54,2_28],[5_19,2_13,5_54,2_28],[1_34,4_33,1_87,4_54],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[3_14,4_69,3_76,4_82],[5_04,6_84,5_82,7_06],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[6_10,7_49,6_52,7_65],[1_30,6_59,1_68,6_72],[1_76,6_57,2_37,6_72],[2_38,6_57,3_12,6_72],[4_43,6_53,6_28,6_72],[4_43,6_53,6_28,6_72],[7_16,3_01,8_25,3_17],[10_00,10_00,10_00,10_00]]] ) # noqa: E231
_lowerCAmelCase : Any = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
_lowerCAmelCase : Union[str, Any] = tf.convert_to_tensor([[-1_00,10,10,10,9,1,-1_00,7,7,-1_00,7,7,4,2,5,2,8,8,-1_00,-1_00,5,0,3,2,-1_00],[-1_00,12,12,12,-1_00,12,10,-1_00,-1_00,-1_00,-1_00,10,12,9,-1_00,-1_00,-1_00,10,10,10,9,12,-1_00,10,-1_00]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class a_ (unittest.TestCase ):
@slow
def __UpperCamelCase ( self ):
_lowerCAmelCase : Any = TFLayoutLMModel.from_pretrained("""microsoft/layoutlm-base-uncased""" )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = prepare_layoutlm_batch_inputs()
# forward pass
_lowerCAmelCase : Optional[Any] = model(input_ids=snake_case_ , bbox=snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
# test the sequence output on [0, :3, :3]
_lowerCAmelCase : Optional[Any] = tf.convert_to_tensor(
[[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case_ , atol=1E-3 ) )
# test the pooled output on [1, :3]
_lowerCAmelCase : Dict = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , snake_case_ , atol=1E-3 ) )
@slow
def __UpperCamelCase ( self ):
# initialize model with randomly initialized sequence classification head
_lowerCAmelCase : List[str] = TFLayoutLMForSequenceClassification.from_pretrained("""microsoft/layoutlm-base-uncased""" , num_labels=2 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : int = prepare_layoutlm_batch_inputs()
# forward pass
_lowerCAmelCase : List[Any] = model(
input_ids=snake_case_ , bbox=snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
_lowerCAmelCase : Tuple = outputs.loss
_lowerCAmelCase : Optional[Any] = (2,)
self.assertEqual(loss.shape , snake_case_ )
# test the shape of the logits
_lowerCAmelCase : Optional[Any] = outputs.logits
_lowerCAmelCase : Tuple = (2, 2)
self.assertEqual(logits.shape , snake_case_ )
@slow
def __UpperCamelCase ( self ):
# initialize model with randomly initialized token classification head
_lowerCAmelCase : Any = TFLayoutLMForTokenClassification.from_pretrained("""microsoft/layoutlm-base-uncased""" , num_labels=1_3 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : int = prepare_layoutlm_batch_inputs()
# forward pass
_lowerCAmelCase : Any = model(
input_ids=snake_case_ , bbox=snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
# test the shape of the logits
_lowerCAmelCase : str = outputs.logits
_lowerCAmelCase : Dict = tf.convert_to_tensor((2, 2_5, 1_3) )
self.assertEqual(logits.shape , snake_case_ )
@slow
def __UpperCamelCase ( self ):
# initialize model with randomly initialized token classification head
_lowerCAmelCase : List[Any] = TFLayoutLMForQuestionAnswering.from_pretrained("""microsoft/layoutlm-base-uncased""" )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = prepare_layoutlm_batch_inputs()
# forward pass
_lowerCAmelCase : List[Any] = model(input_ids=snake_case_ , bbox=snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
# test the shape of the logits
_lowerCAmelCase : int = tf.convert_to_tensor((2, 2_5) )
self.assertEqual(outputs.start_logits.shape , snake_case_ )
self.assertEqual(outputs.end_logits.shape , snake_case_ )
| 309
|
'''simple docstring'''
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("""socket.socket""" )
@patch("""builtins.open""" )
def _UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] ) -> Union[str, Any]:
# ===== initialization =====
_lowerCAmelCase : Tuple = Mock()
_lowerCAmelCase : Any = conn, Mock()
_lowerCAmelCase : Optional[Any] = iter([1, None] )
_lowerCAmelCase : str = lambda _lowerCamelCase : next(_lowerCamelCase )
# ===== invoke =====
send_file(filename="""mytext.txt""" , testing=_lowerCamelCase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 309
| 1
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class a_ (_a ):
def __init__( self , *snake_case_ , **snake_case_ ):
warnings.warn(
"""The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use BeitImageProcessor instead.""" , snake_case_ , )
super().__init__(*snake_case_ , **snake_case_ )
| 309
|
'''simple docstring'''
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a_ :
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=3_0 , snake_case_=2 , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=3_2 , snake_case_=5 , snake_case_=4 , snake_case_=3_7 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=1_0 , snake_case_=0.02 , snake_case_=None , ):
_lowerCAmelCase : Optional[Any] = parent
_lowerCAmelCase : Any = batch_size
_lowerCAmelCase : Tuple = image_size
_lowerCAmelCase : int = patch_size
_lowerCAmelCase : Any = num_channels
_lowerCAmelCase : str = is_training
_lowerCAmelCase : Any = use_labels
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : int = num_hidden_layers
_lowerCAmelCase : Dict = num_attention_heads
_lowerCAmelCase : Union[str, Any] = intermediate_size
_lowerCAmelCase : Dict = hidden_act
_lowerCAmelCase : str = hidden_dropout_prob
_lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
_lowerCAmelCase : Any = type_sequence_label_size
_lowerCAmelCase : str = initializer_range
_lowerCAmelCase : Optional[Any] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase : List[Any] = (image_size // patch_size) ** 2
_lowerCAmelCase : Dict = num_patches + 1
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : List[str] = None
if self.use_labels:
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ):
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : List[Any] = ViTMSNModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : Tuple = self.type_sequence_label_size
_lowerCAmelCase : int = ViTMSNForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : Optional[int] = model(snake_case_ , labels=snake_case_ )
print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" )
print("""Labels: {labels}""" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCAmelCase : int = 1
_lowerCAmelCase : List[str] = ViTMSNForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[int] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = config_and_inputs
_lowerCAmelCase : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a_ (_a , _a , unittest.TestCase ):
__lowerCAmelCase : Tuple = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__lowerCAmelCase : Optional[int] = (
{"""feature-extraction""": ViTMSNModel, """image-classification""": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__lowerCAmelCase : Dict = False
__lowerCAmelCase : Optional[Any] = False
__lowerCAmelCase : List[str] = False
__lowerCAmelCase : Any = False
def __UpperCamelCase ( self ):
_lowerCAmelCase : Tuple = ViTMSNModelTester(self )
_lowerCAmelCase : int = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMSN does not use inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : List[str] = model_class(snake_case_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case_ , nn.Linear ) )
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[int] = model_class(snake_case_ )
_lowerCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Optional[Any] = [*signature.parameters.keys()]
_lowerCAmelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@slow
def __UpperCamelCase ( self ):
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Optional[int] = ViTMSNModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def _UpperCAmelCase ( ) -> Tuple:
_lowerCAmelCase : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a_ (unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self ):
return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None
@slow
def __UpperCamelCase ( self ):
torch.manual_seed(2 )
_lowerCAmelCase : Dict = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(snake_case_ )
_lowerCAmelCase : Dict = self.default_image_processor
_lowerCAmelCase : Any = prepare_img()
_lowerCAmelCase : List[str] = image_processor(images=snake_case_ , return_tensors="""pt""" ).to(snake_case_ )
# forward pass
with torch.no_grad():
_lowerCAmelCase : Dict = model(**snake_case_ )
# verify the logits
_lowerCAmelCase : Dict = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case_ )
_lowerCAmelCase : Tuple = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1E-4 ) )
| 309
| 1
|
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a_ (_a , _a , unittest.TestCase ):
__lowerCAmelCase : Optional[Any] = StableDiffusionDiffEditPipeline
__lowerCAmelCase : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""height""", """width""", """image"""} | {"""image_latents"""}
__lowerCAmelCase : Dict = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"""image"""} | {"""image_latents"""}
__lowerCAmelCase : List[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__lowerCAmelCase : Tuple = frozenset([] )
def __UpperCamelCase ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Dict = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=snake_case_ , )
_lowerCAmelCase : Dict = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , )
_lowerCAmelCase : Optional[int] = DDIMInverseScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=snake_case_ , set_alpha_to_zero=snake_case_ , )
torch.manual_seed(0 )
_lowerCAmelCase : str = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
_lowerCAmelCase : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="""gelu""" , projection_dim=5_1_2 , )
_lowerCAmelCase : Optional[int] = CLIPTextModel(snake_case_ )
_lowerCAmelCase : List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowerCAmelCase : int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""inverse_scheduler""": inverse_scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __UpperCamelCase ( self , snake_case_ , snake_case_=0 ):
_lowerCAmelCase : int = floats_tensor((1, 1_6, 1_6) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
_lowerCAmelCase : int = floats_tensor((1, 2, 4, 1_6, 1_6) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
if str(snake_case_ ).startswith("""mps""" ):
_lowerCAmelCase : Tuple = torch.manual_seed(snake_case_ )
else:
_lowerCAmelCase : List[str] = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
_lowerCAmelCase : str = {
"""prompt""": """a dog and a newt""",
"""mask_image""": mask,
"""image_latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def __UpperCamelCase ( self , snake_case_ , snake_case_=0 ):
_lowerCAmelCase : Union[str, Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
_lowerCAmelCase : str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase : Optional[Any] = Image.fromarray(np.uinta(snake_case_ ) ).convert("""RGB""" )
if str(snake_case_ ).startswith("""mps""" ):
_lowerCAmelCase : Dict = torch.manual_seed(snake_case_ )
else:
_lowerCAmelCase : str = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
_lowerCAmelCase : int = {
"""image""": image,
"""source_prompt""": """a cat and a frog""",
"""target_prompt""": """a dog and a newt""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""num_maps_per_mask""": 2,
"""mask_encode_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def __UpperCamelCase ( self , snake_case_ , snake_case_=0 ):
_lowerCAmelCase : Tuple = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
_lowerCAmelCase : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase : str = Image.fromarray(np.uinta(snake_case_ ) ).convert("""RGB""" )
if str(snake_case_ ).startswith("""mps""" ):
_lowerCAmelCase : Any = torch.manual_seed(snake_case_ )
else:
_lowerCAmelCase : List[str] = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
_lowerCAmelCase : List[str] = {
"""image""": image,
"""prompt""": """a cat and a frog""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""decode_latents""": True,
"""output_type""": """numpy""",
}
return inputs
def __UpperCamelCase ( self ):
if not hasattr(self.pipeline_class , """_optional_components""" ):
return
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : str = self.pipeline_class(**snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(snake_case_ , snake_case_ , snake_case_ )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
_lowerCAmelCase : str = self.get_dummy_inputs(snake_case_ )
_lowerCAmelCase : List[Any] = pipe(**snake_case_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(snake_case_ )
_lowerCAmelCase : List[str] = self.pipeline_class.from_pretrained(snake_case_ )
pipe_loaded.to(snake_case_ )
pipe_loaded.set_progress_bar_config(disable=snake_case_ )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(snake_case_ , snake_case_ ) is None , f'`{optional_component}` did not stay set to None after loading.' , )
_lowerCAmelCase : Optional[Any] = self.get_dummy_inputs(snake_case_ )
_lowerCAmelCase : List[Any] = pipe_loaded(**snake_case_ )[0]
_lowerCAmelCase : str = np.abs(output - output_loaded ).max()
self.assertLess(snake_case_ , 1E-4 )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[Any] = """cpu"""
_lowerCAmelCase : Dict = self.get_dummy_components()
_lowerCAmelCase : Union[str, Any] = self.pipeline_class(**snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
_lowerCAmelCase : Union[str, Any] = self.get_dummy_mask_inputs(snake_case_ )
_lowerCAmelCase : int = pipe.generate_mask(**snake_case_ )
_lowerCAmelCase : Optional[Any] = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 1_6, 1_6) )
_lowerCAmelCase : str = np.array([0] * 9 )
_lowerCAmelCase : Any = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(snake_case_ , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = """cpu"""
_lowerCAmelCase : Optional[Any] = self.get_dummy_components()
_lowerCAmelCase : Dict = self.pipeline_class(**snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
_lowerCAmelCase : Tuple = self.get_dummy_inversion_inputs(snake_case_ )
_lowerCAmelCase : List[Any] = pipe.invert(**snake_case_ ).images
_lowerCAmelCase : Optional[Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 3_2, 3_2, 3) )
_lowerCAmelCase : Optional[int] = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.5_1050, 0.5015, 0.4407, 0.4799] , )
_lowerCAmelCase : Union[str, Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(snake_case_ , 1E-3 )
def __UpperCamelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = """cpu"""
_lowerCAmelCase : str = self.get_dummy_components()
_lowerCAmelCase : Tuple = {"""beta_start""": 0.0_0085, """beta_end""": 0.012, """beta_schedule""": """scaled_linear"""}
_lowerCAmelCase : List[Any] = DPMSolverMultistepScheduler(**snake_case_ )
_lowerCAmelCase : Tuple = DPMSolverMultistepInverseScheduler(**snake_case_ )
_lowerCAmelCase : Optional[Any] = self.pipeline_class(**snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
_lowerCAmelCase : Union[str, Any] = self.get_dummy_inversion_inputs(snake_case_ )
_lowerCAmelCase : Union[str, Any] = pipe.invert(**snake_case_ ).images
_lowerCAmelCase : Optional[int] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 3_2, 3_2, 3) )
_lowerCAmelCase : Tuple = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.5_1050, 0.5015, 0.4407, 0.4799] , )
_lowerCAmelCase : Tuple = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(snake_case_ , 1E-3 )
@require_torch_gpu
@slow
class a_ (unittest.TestCase ):
def __UpperCamelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def __UpperCamelCase ( cls ):
_lowerCAmelCase : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""" )
_lowerCAmelCase : int = raw_image.convert("""RGB""" ).resize((7_6_8, 7_6_8) )
_lowerCAmelCase : Optional[Any] = raw_image
def __UpperCamelCase ( self ):
_lowerCAmelCase : Any = torch.manual_seed(0 )
_lowerCAmelCase : str = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=snake_case_ , torch_dtype=torch.floataa )
_lowerCAmelCase : Union[str, Any] = DDIMScheduler.from_config(pipe.scheduler.config )
_lowerCAmelCase : str = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=snake_case_ )
_lowerCAmelCase : int = """a bowl of fruit"""
_lowerCAmelCase : Optional[Any] = """a bowl of pears"""
_lowerCAmelCase : Optional[int] = pipe.generate_mask(
image=self.raw_image , source_prompt=snake_case_ , target_prompt=snake_case_ , generator=snake_case_ , )
_lowerCAmelCase : Optional[int] = pipe.invert(
prompt=snake_case_ , image=self.raw_image , inpaint_strength=0.7 , generator=snake_case_ ).latents
_lowerCAmelCase : Optional[int] = pipe(
prompt=snake_case_ , mask_image=snake_case_ , image_latents=snake_case_ , generator=snake_case_ , negative_prompt=snake_case_ , inpaint_strength=0.7 , output_type="""numpy""" , ).images[0]
_lowerCAmelCase : int = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((7_6_8, 7_6_8) ) )
/ 2_5_5
)
assert np.abs((expected_image - image).max() ) < 5E-1
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = torch.manual_seed(0 )
_lowerCAmelCase : int = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=snake_case_ , torch_dtype=torch.floataa )
_lowerCAmelCase : int = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
_lowerCAmelCase : Union[str, Any] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=snake_case_ )
_lowerCAmelCase : int = """a bowl of fruit"""
_lowerCAmelCase : List[str] = """a bowl of pears"""
_lowerCAmelCase : List[Any] = pipe.generate_mask(
image=self.raw_image , source_prompt=snake_case_ , target_prompt=snake_case_ , generator=snake_case_ , )
_lowerCAmelCase : Union[str, Any] = pipe.invert(
prompt=snake_case_ , image=self.raw_image , inpaint_strength=0.7 , generator=snake_case_ , num_inference_steps=2_5 , ).latents
_lowerCAmelCase : List[str] = pipe(
prompt=snake_case_ , mask_image=snake_case_ , image_latents=snake_case_ , generator=snake_case_ , negative_prompt=snake_case_ , inpaint_strength=0.7 , num_inference_steps=2_5 , output_type="""numpy""" , ).images[0]
_lowerCAmelCase : List[str] = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((7_6_8, 7_6_8) ) )
/ 2_5_5
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 309
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class a_ (_a ):
__lowerCAmelCase : List[Any] = """microsoft/speecht5_tts"""
__lowerCAmelCase : List[Any] = (
"""This is a tool that reads an English text out loud. It takes an input named `text` which should contain the """
"""text to read (in English) and returns a waveform object containing the sound."""
)
__lowerCAmelCase : List[str] = """text_reader"""
__lowerCAmelCase : Optional[Any] = SpeechTaProcessor
__lowerCAmelCase : str = SpeechTaForTextToSpeech
__lowerCAmelCase : int = SpeechTaHifiGan
__lowerCAmelCase : int = ["""text"""]
__lowerCAmelCase : int = ["""audio"""]
def __UpperCamelCase ( self ):
if self.post_processor is None:
_lowerCAmelCase : int = """microsoft/speecht5_hifigan"""
super().setup()
def __UpperCamelCase ( self , snake_case_ , snake_case_=None ):
_lowerCAmelCase : Tuple = self.pre_processor(text=snake_case_ , return_tensors="""pt""" , truncation=snake_case_ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" )
_lowerCAmelCase : List[str] = load_dataset("""Matthijs/cmu-arctic-xvectors""" , split="""validation""" )
_lowerCAmelCase : Any = torch.tensor(embeddings_dataset[7_3_0_5]["""xvector"""] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def __UpperCamelCase ( self , snake_case_ ):
with torch.no_grad():
return self.model.generate_speech(**snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
with torch.no_grad():
return self.post_processor(snake_case_ ).cpu().detach()
| 309
| 1
|
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
def _UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : int ) -> bool:
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def _UpperCAmelCase ( _lowerCamelCase : int ) -> list[str]:
_lowerCAmelCase : List[str] = []
_lowerCAmelCase : List[Any] = 11
_lowerCAmelCase : Optional[int] = int("""1""" + """0""" * digit_len )
for num in range(_lowerCamelCase , _lowerCamelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(_lowerCamelCase , _lowerCamelCase ):
solutions.append(f'{num}/{den}' )
den += 1
num += 1
_lowerCAmelCase : List[Any] = 10
return solutions
def _UpperCAmelCase ( _lowerCamelCase : int = 2 ) -> int:
_lowerCAmelCase : Union[str, Any] = 1.0
for fraction in fraction_list(_lowerCamelCase ):
_lowerCAmelCase : str = Fraction(_lowerCamelCase )
result *= frac.denominator / frac.numerator
return int(_lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 309
|
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _UpperCAmelCase ( ) -> Tuple:
_lowerCAmelCase : List[Any] = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"""
_lowerCAmelCase : int = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert("""RGB""" )
return image
def _UpperCAmelCase ( _lowerCamelCase : Any ) -> Dict:
_lowerCAmelCase : str = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.weight', f'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.bias', f'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.weight', f'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.bias', f'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.qkv.weight', f'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.weight', f'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.bias', f'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.weight', f'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.bias', f'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.weight', f'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.bias', f'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") )
# fmt: on
return rename_keys
def _UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any] ) -> Optional[Any]:
_lowerCAmelCase : str = dct.pop(_lowerCamelCase )
_lowerCAmelCase : str = val
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple ) -> Tuple:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_lowerCAmelCase : Tuple = state_dict.pop(f'visual_encoder.blocks.{i}.attn.q_bias' )
_lowerCAmelCase : Optional[Any] = state_dict.pop(f'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
_lowerCAmelCase : int = torch.cat((q_bias, torch.zeros_like(_lowerCamelCase , requires_grad=_lowerCamelCase ), v_bias) )
_lowerCAmelCase : str = qkv_bias
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] ) -> List[Any]:
_lowerCAmelCase : str = 3_64 if """coco""" in model_name else 2_24
_lowerCAmelCase : str = BlipaVisionConfig(image_size=_lowerCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
_lowerCAmelCase : int = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=_lowerCamelCase ).to_dict()
elif "opt-6.7b" in model_name:
_lowerCAmelCase : Union[str, Any] = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=_lowerCamelCase ).to_dict()
elif "t5-xl" in model_name:
_lowerCAmelCase : Optional[int] = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_lowerCAmelCase : str = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
_lowerCAmelCase : Dict = BlipaConfig(vision_config=_lowerCamelCase , text_config=_lowerCamelCase )
return config, image_size
@torch.no_grad()
def _UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : List[Any]=None , _lowerCamelCase : int=False ) -> List[str]:
_lowerCAmelCase : int = (
AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" )
if """opt""" in model_name
else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" )
)
_lowerCAmelCase : List[Any] = tokenizer("""\n""" , add_special_tokens=_lowerCamelCase ).input_ids[0]
_lowerCAmelCase , _lowerCAmelCase : List[str] = get_blipa_config(_lowerCamelCase , eos_token_id=_lowerCamelCase )
_lowerCAmelCase : Optional[int] = BlipaForConditionalGeneration(_lowerCamelCase ).eval()
_lowerCAmelCase : Union[str, Any] = {
"""blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""),
"""blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""),
"""blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""),
"""blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""),
"""blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""),
"""blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""),
"""blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""),
}
_lowerCAmelCase , _lowerCAmelCase : List[str] = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
_lowerCAmelCase : Dict = """cuda""" if torch.cuda.is_available() else """cpu"""
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = load_model_and_preprocess(
name=_lowerCamelCase , model_type=_lowerCamelCase , is_eval=_lowerCamelCase , device=_lowerCamelCase )
original_model.eval()
print("""Done!""" )
# update state dict keys
_lowerCAmelCase : List[Any] = original_model.state_dict()
_lowerCAmelCase : Optional[int] = create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_lowerCAmelCase : Tuple = state_dict.pop(_lowerCamelCase )
if key.startswith("""Qformer.bert""" ):
_lowerCAmelCase : List[Any] = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
_lowerCAmelCase : Optional[int] = key.replace("""self""" , """attention""" )
if "opt_proj" in key:
_lowerCAmelCase : Dict = key.replace("""opt_proj""" , """language_projection""" )
if "t5_proj" in key:
_lowerCAmelCase : Tuple = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""opt""" ):
_lowerCAmelCase : List[Any] = key.replace("""opt""" , """language""" )
if key.startswith("""t5""" ):
_lowerCAmelCase : int = key.replace("""t5""" , """language""" )
_lowerCAmelCase : Tuple = val
# read in qv biases
read_in_q_v_bias(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = hf_model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert len(_lowerCamelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
_lowerCAmelCase : Union[str, Any] = load_demo_image()
_lowerCAmelCase : Optional[int] = vis_processors["""eval"""](_lowerCamelCase ).unsqueeze(0 ).to(_lowerCamelCase )
_lowerCAmelCase : List[str] = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(_lowerCamelCase )
# create processor
_lowerCAmelCase : Optional[int] = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=_lowerCamelCase , image_std=_lowerCamelCase )
_lowerCAmelCase : Tuple = BlipaProcessor(image_processor=_lowerCamelCase , tokenizer=_lowerCamelCase )
_lowerCAmelCase : Any = processor(images=_lowerCamelCase , return_tensors="""pt""" ).pixel_values.to(_lowerCamelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
original_model.to(_lowerCamelCase )
hf_model.to(_lowerCamelCase )
with torch.no_grad():
if "opt" in model_name:
_lowerCAmelCase : Optional[Any] = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits
_lowerCAmelCase : Optional[Any] = hf_model(_lowerCamelCase , _lowerCamelCase ).logits
else:
_lowerCAmelCase : List[Any] = original_model(
{"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits
_lowerCAmelCase : Tuple = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
_lowerCAmelCase : Dict = hf_model(_lowerCamelCase , _lowerCamelCase , labels=_lowerCamelCase ).logits
assert original_logits.shape == logits.shape
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
_lowerCAmelCase : Any = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=_lowerCamelCase )
assert torch.allclose(logits[0, :3, :3] , _lowerCamelCase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
_lowerCAmelCase : List[Any] = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=_lowerCamelCase )
else:
# cast to same type
_lowerCAmelCase : Union[str, Any] = logits.dtype
assert torch.allclose(original_logits.to(_lowerCamelCase ) , _lowerCamelCase , atol=1e-2 )
print("""Looks ok!""" )
print("""Generating a caption...""" )
_lowerCAmelCase : Optional[int] = """"""
_lowerCAmelCase : Union[str, Any] = tokenizer(_lowerCamelCase , return_tensors="""pt""" ).input_ids.to(_lowerCamelCase )
_lowerCAmelCase : List[Any] = original_model.generate({"""image""": original_pixel_values} )
_lowerCAmelCase : Dict = hf_model.generate(
_lowerCamelCase , _lowerCamelCase , do_sample=_lowerCamelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("""Original generation:""" , _lowerCamelCase )
_lowerCAmelCase : int = input_ids.shape[1]
_lowerCAmelCase : str = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowerCamelCase )
_lowerCAmelCase : List[str] = [text.strip() for text in output_text]
print("""HF generation:""" , _lowerCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if push_to_hub:
processor.push_to_hub(f'nielsr/{model_name}' )
hf_model.push_to_hub(f'nielsr/{model_name}' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
UpperCamelCase_ = [
"""blip2-opt-2.7b""",
"""blip2-opt-6.7b""",
"""blip2-opt-2.7b-coco""",
"""blip2-opt-6.7b-coco""",
"""blip2-flan-t5-xl""",
"""blip2-flan-t5-xl-coco""",
"""blip2-flan-t5-xxl""",
]
parser.add_argument(
"""--model_name""",
default="""blip2-opt-2.7b""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
UpperCamelCase_ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 309
| 1
|
'''simple docstring'''
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def _UpperCAmelCase ( ) -> Any:
_lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""-m""" , """--pretrained_model_name_or_path""" , type=_lowerCamelCase , default=_lowerCamelCase , required=_lowerCamelCase , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , )
parser.add_argument(
"""-c""" , """--caption""" , type=_lowerCamelCase , default="""robotic cat with wings""" , help="""Text used to generate images.""" , )
parser.add_argument(
"""-n""" , """--images_num""" , type=_lowerCamelCase , default=4 , help="""How much images to generate.""" , )
parser.add_argument(
"""-s""" , """--seed""" , type=_lowerCamelCase , default=42 , help="""Seed for random process.""" , )
parser.add_argument(
"""-ci""" , """--cuda_id""" , type=_lowerCamelCase , default=0 , help="""cuda_id.""" , )
_lowerCAmelCase : str = parser.parse_args()
return args
def _UpperCAmelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : Dict , _lowerCamelCase : List[str] ) -> List[Any]:
if not len(_lowerCamelCase ) == rows * cols:
raise ValueError("""The specified number of rows and columns are not correct.""" )
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = imgs[0].size
_lowerCAmelCase : Tuple = Image.new("""RGB""" , size=(cols * w, rows * h) )
_lowerCAmelCase , _lowerCAmelCase : Any = grid.size
for i, img in enumerate(_lowerCamelCase ):
grid.paste(_lowerCamelCase , box=(i % cols * w, i // cols * h) )
return grid
def _UpperCAmelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int]="robotic cat with wings" , _lowerCamelCase : List[Any]=7.5 , _lowerCamelCase : List[Any]=50 , _lowerCamelCase : Optional[Any]=1 , _lowerCamelCase : List[str]=42 , ) -> Optional[int]:
_lowerCAmelCase : int = torch.Generator(pipeline.device ).manual_seed(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = pipeline(
_lowerCamelCase , guidance_scale=_lowerCamelCase , num_inference_steps=_lowerCamelCase , generator=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , ).images
_lowerCAmelCase : int = int(math.sqrt(_lowerCamelCase ) )
_lowerCAmelCase : List[str] = image_grid(_lowerCamelCase , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
UpperCamelCase_ = parse_args()
# Load models and create wrapper for stable diffusion
UpperCamelCase_ = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="""tokenizer""")
UpperCamelCase_ = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="""text_encoder""")
UpperCamelCase_ = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="""vae""")
UpperCamelCase_ = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="""unet""")
UpperCamelCase_ = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
UpperCamelCase_ = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, """best_model.pt""")):
UpperCamelCase_ = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, """unet""", unet)
else:
UpperCamelCase_ = unet.to(torch.device("""cuda""", args.cuda_id))
UpperCamelCase_ = pipeline.to(unet.device)
UpperCamelCase_ , UpperCamelCase_ = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, """{}.png""".format("""_""".join(args.caption.split()))))
UpperCamelCase_ = os.path.join(args.pretrained_model_name_or_path, """_""".join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, """{}.png""".format(idx + 1)))
| 309
|
'''simple docstring'''
import argparse
import os
import re
UpperCamelCase_ = """src/diffusers"""
# Pattern that looks at the indentation in a line.
UpperCamelCase_ = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
UpperCamelCase_ = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
UpperCamelCase_ = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
UpperCamelCase_ = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
UpperCamelCase_ = re.compile(r"""\[([^\]]+)\]""")
def _UpperCAmelCase ( _lowerCamelCase : List[Any] ) -> str:
_lowerCAmelCase : Dict = _re_indent.search(_lowerCamelCase )
return "" if search is None else search.groups()[0]
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str]="" , _lowerCamelCase : str=None , _lowerCamelCase : List[Any]=None ) -> str:
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Tuple = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(_lowerCamelCase ):
index += 1
_lowerCAmelCase : List[Any] = ["""\n""".join(lines[:index] )]
else:
_lowerCAmelCase : List[str] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_lowerCAmelCase : Union[str, Any] = [lines[index]]
index += 1
while index < len(_lowerCamelCase ) and (end_prompt is None or not lines[index].startswith(_lowerCamelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_lowerCamelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(_lowerCamelCase ) )
if index < len(_lowerCamelCase ) - 1:
_lowerCAmelCase : Union[str, Any] = [lines[index + 1]]
index += 1
else:
_lowerCAmelCase : Dict = []
else:
blocks.append("""\n""".join(_lowerCamelCase ) )
_lowerCAmelCase : Tuple = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_lowerCamelCase ) > 0:
blocks.append("""\n""".join(_lowerCamelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_lowerCamelCase ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] ) -> Any:
def _inner(_lowerCamelCase : Any ):
return key(_lowerCamelCase ).lower().replace("""_""" , """""" )
return _inner
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple=None ) -> Union[str, Any]:
# If no key is provided, we use a noop.
def noop(_lowerCamelCase : List[Any] ):
return x
if key is None:
_lowerCAmelCase : Union[str, Any] = noop
# Constants are all uppercase, they go first.
_lowerCAmelCase : Any = [obj for obj in objects if key(_lowerCamelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_lowerCAmelCase : Union[str, Any] = [obj for obj in objects if key(_lowerCamelCase )[0].isupper() and not key(_lowerCamelCase ).isupper()]
# Functions begin with a lowercase, they go last.
_lowerCAmelCase : Optional[Any] = [obj for obj in objects if not key(_lowerCamelCase )[0].isupper()]
_lowerCAmelCase : List[str] = ignore_underscore(_lowerCamelCase )
return sorted(_lowerCamelCase , key=_lowerCamelCase ) + sorted(_lowerCamelCase , key=_lowerCamelCase ) + sorted(_lowerCamelCase , key=_lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : str ) -> str:
# This inner function sort imports between [ ].
def _replace(_lowerCamelCase : Union[str, Any] ):
_lowerCAmelCase : Optional[Any] = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
_lowerCAmelCase : List[str] = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : int = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(_lowerCamelCase )] ) + "]"
_lowerCAmelCase : Optional[int] = import_statement.split("""\n""" )
if len(_lowerCamelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_lowerCAmelCase : Dict = 2 if lines[1].strip() == """[""" else 1
_lowerCAmelCase : Tuple = [(i, _re_strip_line.search(_lowerCamelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_lowerCAmelCase : Tuple = sort_objects(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] )
_lowerCAmelCase : Optional[Any] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_lowerCamelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_lowerCAmelCase : str = _re_bracket_content.sub(_replace , lines[1] )
else:
_lowerCAmelCase : Tuple = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : Dict = keys[:-1]
_lowerCAmelCase : Optional[Any] = get_indent(lines[1] ) + """, """.join([f'"{k}"' for k in sort_objects(_lowerCamelCase )] )
return "\n".join(_lowerCamelCase )
else:
# Finally we have to deal with imports fitting on one line
_lowerCAmelCase : Dict = _re_bracket_content.sub(_replace , _lowerCamelCase )
return import_statement
def _UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : Union[str, Any]=True ) -> List[str]:
with open(_lowerCamelCase , """r""" ) as f:
_lowerCAmelCase : Optional[Any] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_lowerCAmelCase : Optional[Any] = split_code_in_indented_blocks(
_lowerCamelCase , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_lowerCamelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_lowerCAmelCase : List[str] = main_blocks[block_idx]
_lowerCAmelCase : int = block.split("""\n""" )
# Get to the start of the imports.
_lowerCAmelCase : Any = 0
while line_idx < len(_lowerCamelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase )
else:
line_idx += 1
if line_idx >= len(_lowerCamelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
_lowerCAmelCase : Any = """\n""".join(block_lines[line_idx:-1] )
_lowerCAmelCase : Tuple = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_lowerCAmelCase : Optional[Any] = split_code_in_indented_blocks(_lowerCamelCase , indent_level=_lowerCamelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
_lowerCAmelCase : List[Any] = _re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_lowerCAmelCase : Tuple = [(pattern.search(_lowerCamelCase ).groups()[0] if pattern.search(_lowerCamelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_lowerCAmelCase : List[str] = [(i, key) for i, key in enumerate(_lowerCamelCase ) if key is not None]
_lowerCAmelCase : List[str] = [x[0] for x in sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : List[str] = []
for i in range(len(_lowerCamelCase ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
_lowerCAmelCase : Any = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(_lowerCamelCase )
count += 1
# And we put our main block back together with its first and last line.
_lowerCAmelCase : str = """\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(_lowerCamelCase ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(_lowerCamelCase , """w""" ) as f:
f.write("""\n""".join(_lowerCamelCase ) )
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any]=True ) -> Any:
_lowerCAmelCase : List[Any] = []
for root, _, files in os.walk(_lowerCamelCase ):
if "__init__.py" in files:
_lowerCAmelCase : List[Any] = sort_imports(os.path.join(_lowerCamelCase , """__init__.py""" ) , check_only=_lowerCamelCase )
if result:
_lowerCAmelCase : str = [os.path.join(_lowerCamelCase , """__init__.py""" )]
if len(_lowerCamelCase ) > 0:
raise ValueError(f'Would overwrite {len(_lowerCamelCase )} files, run `make style`.' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
UpperCamelCase_ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 309
| 1
|
'''simple docstring'''
from timeit import timeit
UpperCamelCase_ = {
"""MALAYALAM""": True,
"""String""": False,
"""rotor""": True,
"""level""": True,
"""A""": True,
"""BB""": True,
"""ABC""": False,
"""amanaplanacanalpanama""": True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def _UpperCAmelCase ( _lowerCamelCase : str ) -> bool:
_lowerCAmelCase : List[str] = 0
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def _UpperCAmelCase ( _lowerCamelCase : str ) -> bool:
_lowerCAmelCase : Union[str, Any] = len(_lowerCamelCase ) // 2
_lowerCAmelCase : int = len(_lowerCamelCase )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(_lowerCamelCase ) )
def _UpperCAmelCase ( _lowerCamelCase : str ) -> bool:
if len(_lowerCamelCase ) <= 2:
return True
if s[0] == s[len(_lowerCamelCase ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def _UpperCAmelCase ( _lowerCamelCase : str ) -> bool:
return s == s[::-1]
def _UpperCAmelCase ( _lowerCamelCase : str ) -> None:
_lowerCAmelCase : Any = f'all({name}(key) is value for key, value in test_data.items())'
_lowerCAmelCase : Optional[int] = f'from __main__ import test_data, {name}'
_lowerCAmelCase : int = 50_00_00
_lowerCAmelCase : Dict = timeit(stmt=_lowerCamelCase , setup=_lowerCamelCase , number=_lowerCamelCase )
print(f'{name:<35} finished {number:,} runs in {result:.5f} seconds' )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(F'{key:21} {value}')
print("""a man a plan a canal panama""")
# finished 500,000 runs in 0.46793 seconds
benchmark_function("""is_palindrome_slice""")
# finished 500,000 runs in 0.85234 seconds
benchmark_function("""is_palindrome""")
# finished 500,000 runs in 1.32028 seconds
benchmark_function("""is_palindrome_recursive""")
# finished 500,000 runs in 2.08679 seconds
benchmark_function("""is_palindrome_traversal""")
| 309
|
'''simple docstring'''
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
UpperCamelCase_ = logging.get_logger(__name__)
class a_ :
def __init__( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : List[str] = question_encoder
_lowerCAmelCase : Optional[Any] = generator
_lowerCAmelCase : Optional[Any] = self.question_encoder
def __UpperCamelCase ( self , snake_case_ ):
if os.path.isfile(snake_case_ ):
raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
_lowerCAmelCase : Any = os.path.join(snake_case_ , """question_encoder_tokenizer""" )
_lowerCAmelCase : Tuple = os.path.join(snake_case_ , """generator_tokenizer""" )
self.question_encoder.save_pretrained(snake_case_ )
self.generator.save_pretrained(snake_case_ )
@classmethod
def __UpperCamelCase ( cls , snake_case_ , **snake_case_ ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
_lowerCAmelCase : Dict = kwargs.pop("""config""" , snake_case_ )
if config is None:
_lowerCAmelCase : List[Any] = RagConfig.from_pretrained(snake_case_ )
_lowerCAmelCase : int = AutoTokenizer.from_pretrained(
snake_case_ , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" )
_lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(
snake_case_ , config=config.generator , subfolder="""generator_tokenizer""" )
return cls(question_encoder=snake_case_ , generator=snake_case_ )
def __call__( self , *snake_case_ , **snake_case_ ):
return self.current_tokenizer(*snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , *snake_case_ , **snake_case_ ):
return self.generator.batch_decode(*snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , *snake_case_ , **snake_case_ ):
return self.generator.decode(*snake_case_ , **snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.question_encoder
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[Any] = self.generator
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = "longest" , snake_case_ = None , snake_case_ = True , **snake_case_ , ):
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" , snake_case_ , )
if max_length is None:
_lowerCAmelCase : Any = self.current_tokenizer.model_max_length
_lowerCAmelCase : List[Any] = self(
snake_case_ , add_special_tokens=snake_case_ , return_tensors=snake_case_ , max_length=snake_case_ , padding=snake_case_ , truncation=snake_case_ , **snake_case_ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
_lowerCAmelCase : List[str] = self.current_tokenizer.model_max_length
_lowerCAmelCase : List[str] = self(
text_target=snake_case_ , add_special_tokens=snake_case_ , return_tensors=snake_case_ , padding=snake_case_ , max_length=snake_case_ , truncation=snake_case_ , **snake_case_ , )
_lowerCAmelCase : Dict = labels["""input_ids"""]
return model_inputs
| 309
| 1
|
'''simple docstring'''
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class a_ (_a ):
__lowerCAmelCase : Any = (KDPMaDiscreteScheduler,)
__lowerCAmelCase : Tuple = 1_0
def __UpperCamelCase ( self , **snake_case_ ):
_lowerCAmelCase : Union[str, Any] = {
"""num_train_timesteps""": 1_1_0_0,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**snake_case_ )
return config
def __UpperCamelCase ( self ):
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=snake_case_ )
def __UpperCamelCase ( self ):
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=snake_case_ , beta_end=snake_case_ )
def __UpperCamelCase ( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=snake_case_ )
def __UpperCamelCase ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCAmelCase : int = self.get_scheduler_config(prediction_type="""v_prediction""" )
_lowerCAmelCase : List[str] = scheduler_class(**snake_case_ )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCAmelCase : Optional[Any] = self.dummy_model()
_lowerCAmelCase : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCAmelCase : str = sample.to(snake_case_ )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Tuple = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : Dict = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Tuple = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : Union[str, Any] = output.prev_sample
_lowerCAmelCase : Dict = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Optional[Any] = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.69_34E-07 ) < 1E-2
assert abs(result_mean.item() - 6.11_12E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_93_42_86_50_17_09_72E-07 ) < 1E-2
assert abs(result_mean.item() - 0.0002 ) < 1E-3
def __UpperCamelCase ( self ):
if torch_device == "mps":
return
_lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCAmelCase : Dict = self.get_scheduler_config()
_lowerCAmelCase : int = scheduler_class(**snake_case_ )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCAmelCase : List[str] = self.dummy_model()
_lowerCAmelCase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCAmelCase : Tuple = sample.to(snake_case_ )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Tuple = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : int = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Dict = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : Optional[Any] = output.prev_sample
_lowerCAmelCase : Dict = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Union[str, Any] = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
def __UpperCamelCase ( self ):
if torch_device == "mps":
return
_lowerCAmelCase : List[str] = self.scheduler_classes[0]
_lowerCAmelCase : Tuple = self.get_scheduler_config()
_lowerCAmelCase : Any = scheduler_class(**snake_case_ )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case_ )
_lowerCAmelCase : Optional[Any] = self.dummy_model()
_lowerCAmelCase : str = self.dummy_sample_deter.to(snake_case_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_lowerCAmelCase : Optional[Any] = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : Tuple = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Union[str, Any] = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : List[Any] = output.prev_sample
_lowerCAmelCase : List[str] = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Tuple = torch.mean(torch.abs(snake_case_ ) )
if str(snake_case_ ).startswith("""cpu""" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
| 309
|
'''simple docstring'''
# Algorithm for the pigeonhole sorting
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] ) -> List[Any]:
_lowerCAmelCase : List[Any] = min(_lowerCamelCase ) # min() finds the minimum value
_lowerCAmelCase : Tuple = max(_lowerCamelCase ) # max() finds the maximum value
_lowerCAmelCase : int = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
_lowerCAmelCase : Dict = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(_lowerCamelCase , _lowerCamelCase ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
_lowerCAmelCase : Any = 0
for count in range(_lowerCamelCase ):
while holes[count] > 0:
holes[count] -= 1
_lowerCAmelCase : Optional[int] = count + min_val
i += 1
def _UpperCAmelCase ( ) -> Optional[int]:
_lowerCAmelCase : Optional[int] = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(_lowerCamelCase )
print("""Sorted order is:""" , """ """.join(_lowerCamelCase ) )
if __name__ == "__main__":
main()
| 309
| 1
|
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class a_ (unittest.TestCase ):
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = tf.convert_to_tensor(
[
[
8.222_0991, # 3rd highest value; idx. 0
-0.562_0044,
5.2322_9752,
4.038_6393,
-6.879_8378,
-0.5478_5802,
-3.201_2153,
2.9277_7176,
1.8817_1953,
7.3534_1276, # 5th highest value; idx. 9
8.4320_7833, # 2nd highest value; idx. 10
-9.8571_1836,
-5.9620_9236,
-1.1303_9161,
-7.111_5294,
-0.836_9633,
-5.318_6408,
7.0642_7407,
0.8136_9344,
-0.8202_3817,
-5.917_9796,
0.5881_3443,
-6.9977_8438,
4.7155_1189,
-0.1877_1637,
7.4402_0759, # 4th highest value; idx. 25
9.3845_0987, # 1st highest value; idx. 26
2.1266_2941,
-9.3256_2038,
2.3565_2522,
], # cummulative prob of 5 highest values <= 0.6
[
0.5842_5518,
4.5313_9238,
-5.5751_0464,
-6.2803_0699,
-7.1952_9503,
-4.0212_2551,
1.3933_7037,
-6.0670_7057,
1.5948_0517,
-9.64_3119,
0.0390_7799,
0.6723_1762,
-8.8820_6726,
6.2711_5922, # 4th highest value; idx. 13
2.2852_0723,
4.8276_7506,
4.3042_1368,
8.827_5313, # 2nd highest value; idx. 17
5.4402_9958, # 5th highest value; idx. 18
-4.473_5794,
7.3857_9536, # 3rd highest value; idx. 20
-2.9105_1663,
2.6194_6077,
-2.567_4762,
-9.4895_9302,
-4.0292_2645,
-1.3541_6918,
9.6770_2323, # 1st highest value; idx. 27
-5.8947_8553,
1.8537_0467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
_lowerCAmelCase : Optional[int] = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 1_0], [0, 2_5], [0, 2_6], [1, 1_3], [1, 1_7], [1, 1_8], [1, 2_0], [1, 2_7]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
_lowerCAmelCase : Optional[Any] = tf.convert_to_tensor(
[8.22_2099, 7.353_4126, 8.43_2078, 7.440_2075, 9.3_8451, 6.27_1159, 8.82_7531, 5.440_2995, 7.385_7956, 9.67_7023] , dtype=tf.floataa , ) # expected non filtered values as noted above
_lowerCAmelCase : str = tf_top_k_top_p_filtering(snake_case_ , top_k=1_0 , top_p=0.6 , min_tokens_to_keep=4 )
_lowerCAmelCase : Optional[Any] = output[output != -float("""inf""" )]
_lowerCAmelCase : int = tf.cast(
tf.where(tf.not_equal(snake_case_ , tf.constant(-float("""inf""" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(snake_case_ , snake_case_ , rtol=1E-12 )
tf.debugging.assert_equal(snake_case_ , snake_case_ )
@require_tf
class a_ (unittest.TestCase , _a ):
# setting framework_dependent_parameters needs to be gated, just like its contents' imports
if is_tf_available():
__lowerCAmelCase : Union[str, Any] = {
"""AutoModelForCausalLM""": TFAutoModelForCausalLM,
"""AutoModelForSpeechSeq2Seq""": TFAutoModelForSpeechSeqaSeq,
"""AutoModelForSeq2SeqLM""": TFAutoModelForSeqaSeqLM,
"""AutoModelForVision2Seq""": TFAutoModelForVisionaSeq,
"""LogitsProcessorList""": TFLogitsProcessorList,
"""MinLengthLogitsProcessor""": TFMinLengthLogitsProcessor,
"""create_tensor_fn""": tf.convert_to_tensor,
"""floats_tensor""": floats_tensor,
"""return_tensors""": """tf""",
}
@slow
def __UpperCamelCase ( self ):
# TF-only test: tf.saved_model export
_lowerCAmelCase : List[str] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
_lowerCAmelCase : Union[str, Any] = 2
_lowerCAmelCase : Optional[int] = 2
class a_ (tf.Module ):
def __init__( self , snake_case_ ):
super(snake_case_ , self ).__init__()
_lowerCAmelCase : Tuple = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name="""input_ids""" ),
tf.TensorSpec((None, input_length) , tf.intaa , name="""attention_mask""" ),
) , jit_compile=snake_case_ , )
def __UpperCamelCase ( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : str = self.model.generate(
input_ids=snake_case_ , attention_mask=snake_case_ , max_new_tokens=snake_case_ , return_dict_in_generate=snake_case_ , )
return {"sequences": outputs["sequences"]}
_lowerCAmelCase : List[str] = [[2, 0], [1_0_2, 1_0_3]]
_lowerCAmelCase : List[Any] = [[1, 0], [1, 1]]
_lowerCAmelCase : str = DummyModel(model=snake_case_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(snake_case_ , snake_case_ , signatures={"""serving_default""": dummy_model.serving} )
_lowerCAmelCase : Union[str, Any] = tf.saved_model.load(snake_case_ ).signatures["""serving_default"""]
for batch_size in range(1 , len(snake_case_ ) + 1 ):
_lowerCAmelCase : List[str] = {
"""input_ids""": tf.constant(dummy_input_ids[:batch_size] ),
"""attention_mask""": tf.constant(dummy_attention_masks[:batch_size] ),
}
_lowerCAmelCase : Union[str, Any] = serving_func(**snake_case_ )["""sequences"""]
_lowerCAmelCase : Optional[Any] = test_model.generate(**snake_case_ , max_new_tokens=snake_case_ )
tf.debugging.assert_equal(snake_case_ , snake_case_ )
@slow
def __UpperCamelCase ( self ):
# TF-only test: tf.saved_model export
_lowerCAmelCase : Union[str, Any] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
_lowerCAmelCase : Tuple = 1
_lowerCAmelCase : Union[str, Any] = 2
class a_ (tf.Module ):
def __init__( self , snake_case_ ):
super(snake_case_ , self ).__init__()
_lowerCAmelCase : Any = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name="""input_ids""" ),
tf.TensorSpec((batch_size, None) , tf.intaa , name="""attention_mask""" ),
) , jit_compile=snake_case_ , )
def __UpperCamelCase ( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : Dict = self.model.generate(
input_ids=snake_case_ , attention_mask=snake_case_ , max_new_tokens=snake_case_ , return_dict_in_generate=snake_case_ , )
return {"sequences": outputs["sequences"]}
_lowerCAmelCase : str = [[2], [1_0_2, 1_0_3]]
_lowerCAmelCase : Tuple = [[1], [1, 1]]
_lowerCAmelCase : Any = DummyModel(model=snake_case_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(snake_case_ , snake_case_ , signatures={"""serving_default""": dummy_model.serving} )
_lowerCAmelCase : str = tf.saved_model.load(snake_case_ ).signatures["""serving_default"""]
for input_row in range(len(snake_case_ ) ):
_lowerCAmelCase : str = {
"""input_ids""": tf.constant([dummy_input_ids[input_row]] ),
"""attention_mask""": tf.constant([dummy_attention_masks[input_row]] ),
}
_lowerCAmelCase : Union[str, Any] = serving_func(**snake_case_ )["""sequences"""]
_lowerCAmelCase : Tuple = test_model.generate(**snake_case_ , max_new_tokens=snake_case_ )
tf.debugging.assert_equal(snake_case_ , snake_case_ )
@slow
@require_tensorflow_text
def __UpperCamelCase ( self ):
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id="""google/flan-t5-small""" , filename="""spiece.model""" , local_dir=snake_case_ )
class a_ (tf.keras.layers.Layer ):
def __init__( self ):
super().__init__()
_lowerCAmelCase : str = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(snake_case_ , """spiece.model""" ) , """rb""" ).read() )
_lowerCAmelCase : str = TFAutoModelForSeqaSeqLM.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
def __UpperCamelCase ( self , snake_case_ , *snake_case_ , **snake_case_ ):
_lowerCAmelCase : Union[str, Any] = self.tokenizer.tokenize(snake_case_ )
_lowerCAmelCase , _lowerCAmelCase : List[str] = text.pad_model_inputs(
snake_case_ , max_seq_length=6_4 , pad_value=self.model.config.pad_token_id )
_lowerCAmelCase : int = self.model.generate(input_ids=snake_case_ , attention_mask=snake_case_ )
return self.tokenizer.detokenize(snake_case_ )
_lowerCAmelCase : Tuple = CompleteSentenceTransformer()
_lowerCAmelCase : List[str] = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="""inputs""" )
_lowerCAmelCase : List[str] = complete_model(snake_case_ )
_lowerCAmelCase : Dict = tf.keras.Model(snake_case_ , snake_case_ )
keras_model.save(snake_case_ )
def __UpperCamelCase ( self ):
# Has PT equivalent: this test relies on random sampling
_lowerCAmelCase : Union[str, Any] = {
"""do_sample""": True,
"""num_beams""": 1,
"""top_p""": 0.7,
"""top_k""": 1_0,
"""temperature""": 0.7,
}
_lowerCAmelCase : Union[str, Any] = 1_4
_lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
_lowerCAmelCase : Optional[Any] = """Hello, my dog is cute and"""
_lowerCAmelCase : Dict = tokenizer(snake_case_ , return_tensors="""tf""" )
_lowerCAmelCase : List[Any] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
_lowerCAmelCase : Tuple = 6_3_8
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
_lowerCAmelCase : Optional[Any] = model.generate(**snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
_lowerCAmelCase : Dict = [6_3_8, 1_9_8]
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
_lowerCAmelCase : Tuple = model.generate(**snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def __UpperCamelCase ( self ):
# Has PT equivalent: ample use of framework-specific code
_lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
_lowerCAmelCase : Optional[Any] = """Hugging Face is a technology company based in New York and Paris."""
_lowerCAmelCase : Any = bart_tokenizer(snake_case_ , return_tensors="""tf""" ).input_ids
_lowerCAmelCase : Optional[int] = TFBartForConditionalGeneration.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
_lowerCAmelCase : int = bart_model.generate(snake_case_ ).numpy()
class a_ (_a ):
def __UpperCamelCase ( self , snake_case_ , snake_case_=None , **snake_case_ ):
return super().call(snake_case_ , **snake_case_ )
_lowerCAmelCase : List[str] = FakeBart.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
_lowerCAmelCase : Union[str, Any] = bart_model.generate(snake_case_ , foo="""bar""" ).numpy()
self.assertTrue(np.array_equal(snake_case_ , snake_case_ ) )
class a_ (bart_model.model.encoder.__class__ ):
def __UpperCamelCase ( self , snake_case_ , **snake_case_ ):
return super().call(snake_case_ , **snake_case_ )
_lowerCAmelCase : str = FakeEncoder(bart_model.config , bart_model.model.shared )
_lowerCAmelCase : Dict = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
_lowerCAmelCase : int = bart_model.generate(snake_case_ ).numpy()
with self.assertRaises(snake_case_ ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(snake_case_ , foo="""bar""" )
| 309
|
'''simple docstring'''
def _UpperCAmelCase ( _lowerCamelCase : int = 1_00 ) -> int:
_lowerCAmelCase : Optional[Any] = (n * (n + 1) // 2) ** 2
_lowerCAmelCase : str = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F'{solution() = }')
| 309
| 1
|
'''simple docstring'''
def _UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : int ) -> int:
return int((input_a, input_a).count(1 ) != 0 )
def _UpperCAmelCase ( ) -> None:
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 309
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FocalNetForImageClassification""",
"""FocalNetForMaskedImageModeling""",
"""FocalNetBackbone""",
"""FocalNetModel""",
"""FocalNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = """▁"""
UpperCamelCase_ = {"""vocab_file""": """spiece.model"""}
UpperCamelCase_ = {
"""vocab_file""": {
"""google/reformer-crime-and-punishment""": (
"""https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"""
)
}
}
UpperCamelCase_ = {
"""google/reformer-crime-and-punishment""": 52_42_88,
}
class a_ (_a ):
__lowerCAmelCase : List[str] = VOCAB_FILES_NAMES
__lowerCAmelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : List[str] = ["""input_ids""", """attention_mask"""]
def __init__( self , snake_case_ , snake_case_="</s>" , snake_case_="<unk>" , snake_case_=[] , snake_case_ = None , **snake_case_ , ):
_lowerCAmelCase : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=snake_case_ , unk_token=snake_case_ , additional_special_tokens=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , **snake_case_ , )
_lowerCAmelCase : int = vocab_file
_lowerCAmelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case_ )
@property
def __UpperCamelCase ( self ):
return self.sp_model.get_piece_size()
def __UpperCamelCase ( self ):
_lowerCAmelCase : Tuple = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
_lowerCAmelCase : Dict = self.__dict__.copy()
_lowerCAmelCase : Tuple = None
return state
def __setstate__( self , snake_case_ ):
_lowerCAmelCase : str = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase : Tuple = {}
_lowerCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCamelCase ( self , snake_case_ ):
return self.sp_model.encode(snake_case_ , out_type=snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
return self.sp_model.piece_to_id(snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
if index < self.sp_model.get_piece_size():
_lowerCAmelCase : Optional[Any] = self.sp_model.IdToPiece(snake_case_ )
return token
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : List[Any] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(snake_case_ ) + token
_lowerCAmelCase : Optional[int] = []
else:
current_sub_tokens.append(snake_case_ )
out_string += self.sp_model.decode(snake_case_ )
return out_string.strip()
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
if not os.path.isdir(snake_case_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCAmelCase : str = os.path.join(
snake_case_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case_ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case_ , """wb""" ) as fi:
_lowerCAmelCase : Tuple = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (out_vocab_file,)
| 309
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
UpperCamelCase_ = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""DPTFeatureExtractor"""]
UpperCamelCase_ = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
| 1
|
'''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
UpperCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class a_ (_a ):
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
super().__init__()
if hasattr(scheduler.config , """steps_offset""" ) and scheduler.config.steps_offset != 1:
_lowerCAmelCase : Union[str, Any] = (
f'The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'
f' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '
"""to update the config accordingly as leaving `steps_offset` might led to incorrect results"""
""" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"""
""" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"""
""" file"""
)
deprecate("""steps_offset!=1""" , """1.0.0""" , snake_case_ , standard_warn=snake_case_ )
_lowerCAmelCase : Optional[Any] = dict(scheduler.config )
_lowerCAmelCase : Optional[Any] = 1
_lowerCAmelCase : List[str] = FrozenDict(snake_case_ )
if hasattr(scheduler.config , """skip_prk_steps""" ) and scheduler.config.skip_prk_steps is False:
_lowerCAmelCase : Optional[int] = (
f'The configuration file of this scheduler: {scheduler} has not set the configuration'
""" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"""
""" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"""
""" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"""
""" Hub, it would be very nice if you could open a Pull request for the"""
""" `scheduler/scheduler_config.json` file"""
)
deprecate("""skip_prk_steps not set""" , """1.0.0""" , snake_case_ , standard_warn=snake_case_ )
_lowerCAmelCase : Optional[int] = dict(scheduler.config )
_lowerCAmelCase : Tuple = True
_lowerCAmelCase : List[Any] = FrozenDict(snake_case_ )
if safety_checker is None:
logger.warning(
f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
segmentation_model=snake_case_ , segmentation_processor=snake_case_ , vae=snake_case_ , text_encoder=snake_case_ , tokenizer=snake_case_ , unet=snake_case_ , scheduler=snake_case_ , safety_checker=snake_case_ , feature_extractor=snake_case_ , )
def __UpperCamelCase ( self , snake_case_ = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_lowerCAmelCase : Tuple = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case_ )
def __UpperCamelCase ( self ):
self.enable_attention_slicing(snake_case_ )
def __UpperCamelCase ( self ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_lowerCAmelCase : Optional[Any] = torch.device("""cuda""" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(snake_case_ , snake_case_ )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __UpperCamelCase ( self ):
if self.device != torch.device("""meta""" ) or not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(snake_case_ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = 5_1_2 , snake_case_ = 5_1_2 , snake_case_ = 5_0 , snake_case_ = 7.5 , snake_case_ = None , snake_case_ = 1 , snake_case_ = 0.0 , snake_case_ = None , snake_case_ = None , snake_case_ = "pil" , snake_case_ = True , snake_case_ = None , snake_case_ = 1 , **snake_case_ , ):
_lowerCAmelCase : str = self.segmentation_processor(
text=[text] , images=[image] , padding="""max_length""" , return_tensors="""pt""" ).to(self.device )
_lowerCAmelCase : Tuple = self.segmentation_model(**snake_case_ )
_lowerCAmelCase : Union[str, Any] = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
_lowerCAmelCase : Union[str, Any] = self.numpy_to_pil(snake_case_ )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
_lowerCAmelCase : Optional[Any] = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=snake_case_ , image=snake_case_ , mask_image=snake_case_ , height=snake_case_ , width=snake_case_ , num_inference_steps=snake_case_ , guidance_scale=snake_case_ , negative_prompt=snake_case_ , num_images_per_prompt=snake_case_ , eta=snake_case_ , generator=snake_case_ , latents=snake_case_ , output_type=snake_case_ , return_dict=snake_case_ , callback=snake_case_ , callback_steps=snake_case_ , )
| 309
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
def _UpperCAmelCase ( _lowerCamelCase : list[float] ) -> Dict:
return np.maximum(0 , _lowerCamelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 309
| 1
|
'''simple docstring'''
import os
import sys
UpperCamelCase_ = os.path.join(os.path.dirname(__file__), """src""")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
UpperCamelCase_ = [
"""torch""",
"""numpy""",
"""tokenizers""",
"""filelock""",
"""requests""",
"""tqdm""",
"""regex""",
"""sentencepiece""",
"""sacremoses""",
"""importlib_metadata""",
"""huggingface_hub""",
]
@add_start_docstrings(AutoConfig.__doc__ )
def _UpperCAmelCase ( *_lowerCamelCase : str , **_lowerCamelCase : int ) -> Optional[int]:
return AutoConfig.from_pretrained(*_lowerCamelCase , **_lowerCamelCase )
@add_start_docstrings(AutoTokenizer.__doc__ )
def _UpperCAmelCase ( *_lowerCamelCase : Optional[Any] , **_lowerCamelCase : int ) -> str:
return AutoTokenizer.from_pretrained(*_lowerCamelCase , **_lowerCamelCase )
@add_start_docstrings(AutoModel.__doc__ )
def _UpperCAmelCase ( *_lowerCamelCase : str , **_lowerCamelCase : Optional[Any] ) -> Optional[int]:
return AutoModel.from_pretrained(*_lowerCamelCase , **_lowerCamelCase )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def _UpperCAmelCase ( *_lowerCamelCase : Tuple , **_lowerCamelCase : Tuple ) -> Dict:
return AutoModelForCausalLM.from_pretrained(*_lowerCamelCase , **_lowerCamelCase )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def _UpperCAmelCase ( *_lowerCamelCase : Tuple , **_lowerCamelCase : int ) -> List[Any]:
return AutoModelForMaskedLM.from_pretrained(*_lowerCamelCase , **_lowerCamelCase )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def _UpperCAmelCase ( *_lowerCamelCase : Tuple , **_lowerCamelCase : str ) -> List[Any]:
return AutoModelForSequenceClassification.from_pretrained(*_lowerCamelCase , **_lowerCamelCase )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def _UpperCAmelCase ( *_lowerCamelCase : Any , **_lowerCamelCase : Union[str, Any] ) -> int:
return AutoModelForQuestionAnswering.from_pretrained(*_lowerCamelCase , **_lowerCamelCase )
| 309
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class a_ (_a ):
def __init__( self , *snake_case_ , **snake_case_ ):
warnings.warn(
"""The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DonutImageProcessor instead.""" , snake_case_ , )
super().__init__(*snake_case_ , **snake_case_ )
| 309
| 1
|
'''simple docstring'''
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class a_ (_a , _a , _a , unittest.TestCase ):
__lowerCAmelCase : Any = StableDiffusionControlNetImgaImgPipeline
__lowerCAmelCase : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
__lowerCAmelCase : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__lowerCAmelCase : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"""control_image"""} )
__lowerCAmelCase : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __UpperCamelCase ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Optional[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , )
torch.manual_seed(0 )
_lowerCAmelCase : str = ControlNetModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , )
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , )
torch.manual_seed(0 )
_lowerCAmelCase : Any = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
_lowerCAmelCase : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
_lowerCAmelCase : Any = CLIPTextModel(snake_case_ )
_lowerCAmelCase : Dict = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowerCAmelCase : Optional[Any] = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __UpperCamelCase ( self , snake_case_ , snake_case_=0 ):
if str(snake_case_ ).startswith("""mps""" ):
_lowerCAmelCase : List[Any] = torch.manual_seed(snake_case_ )
else:
_lowerCAmelCase : Dict = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
_lowerCAmelCase : List[Any] = 2
_lowerCAmelCase : str = randn_tensor(
(1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=snake_case_ , device=torch.device(snake_case_ ) , )
_lowerCAmelCase : Optional[Any] = floats_tensor(control_image.shape , rng=random.Random(snake_case_ ) ).to(snake_case_ )
_lowerCAmelCase : str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase : str = Image.fromarray(np.uinta(snake_case_ ) ).convert("""RGB""" ).resize((6_4, 6_4) )
_lowerCAmelCase : Any = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def __UpperCamelCase ( self ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __UpperCamelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __UpperCamelCase ( self ):
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class a_ (_a , _a , unittest.TestCase ):
__lowerCAmelCase : Optional[Any] = StableDiffusionControlNetImgaImgPipeline
__lowerCAmelCase : Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
__lowerCAmelCase : Tuple = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__lowerCAmelCase : Any = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def __UpperCamelCase ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , )
torch.manual_seed(0 )
def init_weights(snake_case_ ):
if isinstance(snake_case_ , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
_lowerCAmelCase : int = ControlNetModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , )
controlneta.controlnet_down_blocks.apply(snake_case_ )
torch.manual_seed(0 )
_lowerCAmelCase : Dict = ControlNetModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , )
controlneta.controlnet_down_blocks.apply(snake_case_ )
torch.manual_seed(0 )
_lowerCAmelCase : Any = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , )
torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
_lowerCAmelCase : Dict = CLIPTextModel(snake_case_ )
_lowerCAmelCase : Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowerCAmelCase : str = MultiControlNetModel([controlneta, controlneta] )
_lowerCAmelCase : Optional[Any] = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __UpperCamelCase ( self , snake_case_ , snake_case_=0 ):
if str(snake_case_ ).startswith("""mps""" ):
_lowerCAmelCase : Optional[Any] = torch.manual_seed(snake_case_ )
else:
_lowerCAmelCase : Optional[int] = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
_lowerCAmelCase : Any = 2
_lowerCAmelCase : Dict = [
randn_tensor(
(1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=snake_case_ , device=torch.device(snake_case_ ) , ),
randn_tensor(
(1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=snake_case_ , device=torch.device(snake_case_ ) , ),
]
_lowerCAmelCase : Any = floats_tensor(control_image[0].shape , rng=random.Random(snake_case_ ) ).to(snake_case_ )
_lowerCAmelCase : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase : Optional[int] = Image.fromarray(np.uinta(snake_case_ ) ).convert("""RGB""" ).resize((6_4, 6_4) )
_lowerCAmelCase : str = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.get_dummy_components()
_lowerCAmelCase : List[str] = self.pipeline_class(**snake_case_ )
pipe.to(snake_case_ )
_lowerCAmelCase : str = 10.0
_lowerCAmelCase : int = 4
_lowerCAmelCase : Any = self.get_dummy_inputs(snake_case_ )
_lowerCAmelCase : List[Any] = steps
_lowerCAmelCase : Optional[int] = scale
_lowerCAmelCase : Optional[Any] = pipe(**snake_case_ )[0]
_lowerCAmelCase : List[str] = self.get_dummy_inputs(snake_case_ )
_lowerCAmelCase : List[str] = steps
_lowerCAmelCase : Union[str, Any] = scale
_lowerCAmelCase : Any = pipe(**snake_case_ , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
_lowerCAmelCase : str = self.get_dummy_inputs(snake_case_ )
_lowerCAmelCase : Tuple = steps
_lowerCAmelCase : Any = scale
_lowerCAmelCase : Optional[Any] = pipe(**snake_case_ , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
_lowerCAmelCase : Optional[Any] = self.get_dummy_inputs(snake_case_ )
_lowerCAmelCase : str = steps
_lowerCAmelCase : Optional[Any] = scale
_lowerCAmelCase : int = pipe(**snake_case_ , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def __UpperCamelCase ( self ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __UpperCamelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __UpperCamelCase ( self ):
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Any = self.get_dummy_components()
_lowerCAmelCase : Optional[int] = self.pipeline_class(**snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(snake_case_ )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class a_ (unittest.TestCase ):
def __UpperCamelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self ):
_lowerCAmelCase : Any = ControlNetModel.from_pretrained("""lllyasviel/sd-controlnet-canny""" )
_lowerCAmelCase : List[str] = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , safety_checker=snake_case_ , controlnet=snake_case_ )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=snake_case_ )
_lowerCAmelCase : Dict = torch.Generator(device="""cpu""" ).manual_seed(0 )
_lowerCAmelCase : str = """evil space-punk bird"""
_lowerCAmelCase : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ).resize((5_1_2, 5_1_2) )
_lowerCAmelCase : List[str] = load_image(
"""https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png""" ).resize((5_1_2, 5_1_2) )
_lowerCAmelCase : Optional[Any] = pipe(
snake_case_ , snake_case_ , control_image=snake_case_ , generator=snake_case_ , output_type="""np""" , num_inference_steps=5_0 , strength=0.6 , )
_lowerCAmelCase : Dict = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
_lowerCAmelCase : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy""" )
assert np.abs(expected_image - image ).max() < 9E-2
| 309
|
'''simple docstring'''
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class a_ (_a ):
__lowerCAmelCase : Dict = (DPMSolverSDEScheduler,)
__lowerCAmelCase : Dict = 1_0
def __UpperCamelCase ( self , **snake_case_ ):
_lowerCAmelCase : List[Any] = {
"""num_train_timesteps""": 1_1_0_0,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""noise_sampler_seed""": 0,
}
config.update(**snake_case_ )
return config
def __UpperCamelCase ( self ):
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=snake_case_ )
def __UpperCamelCase ( self ):
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=snake_case_ , beta_end=snake_case_ )
def __UpperCamelCase ( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=snake_case_ )
def __UpperCamelCase ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.scheduler_classes[0]
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : Any = scheduler_class(**snake_case_ )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCAmelCase : Tuple = self.dummy_model()
_lowerCAmelCase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCAmelCase : Optional[Any] = sample.to(snake_case_ )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Union[str, Any] = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : Union[str, Any] = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Any = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : Dict = output.prev_sample
_lowerCAmelCase : List[Any] = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1E-2
assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1E-2
assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.scheduler_classes[0]
_lowerCAmelCase : Optional[Any] = self.get_scheduler_config(prediction_type="""v_prediction""" )
_lowerCAmelCase : Dict = scheduler_class(**snake_case_ )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCAmelCase : int = self.dummy_model()
_lowerCAmelCase : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCAmelCase : int = sample.to(snake_case_ )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : List[str] = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : List[Any] = model(snake_case_ , snake_case_ )
_lowerCAmelCase : str = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : int = output.prev_sample
_lowerCAmelCase : str = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Optional[int] = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1E-2
assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1E-2
assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1E-2
assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : str = scheduler_class(**snake_case_ )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case_ )
_lowerCAmelCase : Tuple = self.dummy_model()
_lowerCAmelCase : Optional[int] = self.dummy_sample_deter.to(snake_case_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_lowerCAmelCase : str = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : Dict = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Any = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : Dict = output.prev_sample
_lowerCAmelCase : List[Any] = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1E-2
assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1E-2
assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : Any = self.scheduler_classes[0]
_lowerCAmelCase : Optional[int] = self.get_scheduler_config()
_lowerCAmelCase : Tuple = scheduler_class(**snake_case_ , use_karras_sigmas=snake_case_ )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case_ )
_lowerCAmelCase : List[Any] = self.dummy_model()
_lowerCAmelCase : str = self.dummy_sample_deter.to(snake_case_ ) * scheduler.init_noise_sigma
_lowerCAmelCase : Optional[int] = sample.to(snake_case_ )
for t in scheduler.timesteps:
_lowerCAmelCase : List[str] = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : int = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Optional[int] = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : str = output.prev_sample
_lowerCAmelCase : Optional[Any] = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
| 309
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
"""configuration_megatron_bert""": ["""MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegatronBertConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MegatronBertForCausalLM""",
"""MegatronBertForMaskedLM""",
"""MegatronBertForMultipleChoice""",
"""MegatronBertForNextSentencePrediction""",
"""MegatronBertForPreTraining""",
"""MegatronBertForQuestionAnswering""",
"""MegatronBertForSequenceClassification""",
"""MegatronBertForTokenClassification""",
"""MegatronBertModel""",
"""MegatronBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"""vocab_file""": """vocab.txt"""}
UpperCamelCase_ = {
"""vocab_file""": {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt""",
}
}
UpperCamelCase_ = {
"""YituTech/conv-bert-base""": 5_12,
"""YituTech/conv-bert-medium-small""": 5_12,
"""YituTech/conv-bert-small""": 5_12,
}
UpperCamelCase_ = {
"""YituTech/conv-bert-base""": {"""do_lower_case""": True},
"""YituTech/conv-bert-medium-small""": {"""do_lower_case""": True},
"""YituTech/conv-bert-small""": {"""do_lower_case""": True},
}
class a_ (_a ):
__lowerCAmelCase : Any = VOCAB_FILES_NAMES
__lowerCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Dict = PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Optional[int] = ConvBertTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=True , snake_case_="[UNK]" , snake_case_="[SEP]" , snake_case_="[PAD]" , snake_case_="[CLS]" , snake_case_="[MASK]" , snake_case_=True , snake_case_=None , **snake_case_ , ):
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
_lowerCAmelCase : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , snake_case_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , snake_case_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , snake_case_ ) != tokenize_chinese_chars
):
_lowerCAmelCase : Dict = getattr(snake_case_ , normalizer_state.pop("""type""" ) )
_lowerCAmelCase : List[str] = do_lower_case
_lowerCAmelCase : str = strip_accents
_lowerCAmelCase : List[Any] = tokenize_chinese_chars
_lowerCAmelCase : List[Any] = normalizer_class(**snake_case_ )
_lowerCAmelCase : str = do_lower_case
def __UpperCamelCase ( self , snake_case_ , snake_case_=None ):
_lowerCAmelCase : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : Optional[Any] = [self.sep_token_id]
_lowerCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : Any = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
| 309
| 1
|
'''simple docstring'''
from PIL import Image
def _UpperCAmelCase ( _lowerCamelCase : Image , _lowerCamelCase : float ) -> Image:
def brightness(_lowerCamelCase : int ) -> float:
return 1_28 + level + (c - 1_28)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(_lowerCamelCase )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change brightness to 100
UpperCamelCase_ = change_brightness(img, 1_00)
brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
| 309
|
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a_ :
def __init__( self ):
_lowerCAmelCase : Any = """"""
_lowerCAmelCase : List[Any] = """"""
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : int = 0
_lowerCAmelCase : str = 2_5_6
_lowerCAmelCase : List[str] = 0
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Dict = 0
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : str = cva.imread(snake_case_ , 0 )
_lowerCAmelCase : List[str] = copy.deepcopy(self.img )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] , label="""x""" )
_lowerCAmelCase : List[Any] = np.sum(snake_case_ )
for i in range(len(snake_case_ ) ):
_lowerCAmelCase : Optional[int] = x[i] / self.k
self.sk += prk
_lowerCAmelCase : Any = (self.L - 1) * self.sk
if self.rem != 0:
_lowerCAmelCase : Dict = int(last % last )
_lowerCAmelCase : str = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(snake_case_ )
_lowerCAmelCase : str = int(np.ma.count(self.img ) / self.img[1].size )
_lowerCAmelCase : Union[str, Any] = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
_lowerCAmelCase : Union[str, Any] = self.img[j][i]
if num != self.last_list[num]:
_lowerCAmelCase : List[str] = self.last_list[num]
cva.imwrite("""output_data/output.jpg""" , self.img )
def __UpperCamelCase ( self ):
plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] )
def __UpperCamelCase ( self ):
cva.imshow("""Output-Image""" , self.img )
cva.imshow("""Input-Image""" , self.original_image )
cva.waitKey(5_0_0_0 )
cva.destroyAllWindows()
if __name__ == "__main__":
UpperCamelCase_ = os.path.join(os.path.basename(__file__), """image_data/input.jpg""")
UpperCamelCase_ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 309
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCamelCase_ = logging.get_logger(__name__)
class a_ (_a ):
__lowerCAmelCase : int = ["""pixel_values"""]
def __init__( self , snake_case_ = True , snake_case_ = None , snake_case_ = None , snake_case_ = PILImageResampling.BILINEAR , snake_case_ = True , snake_case_ = 1 / 2_5_5 , snake_case_ = True , snake_case_ = None , snake_case_ = None , **snake_case_ , ):
super().__init__(**snake_case_ )
_lowerCAmelCase : Tuple = size if size is not None else {"""shortest_edge""": 3_8_4}
_lowerCAmelCase : Dict = get_size_dict(snake_case_ , default_to_square=snake_case_ )
_lowerCAmelCase : List[str] = do_resize
_lowerCAmelCase : Dict = size
# Default value set here for backwards compatibility where the value in config is None
_lowerCAmelCase : str = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
_lowerCAmelCase : List[str] = resample
_lowerCAmelCase : int = do_rescale
_lowerCAmelCase : Tuple = rescale_factor
_lowerCAmelCase : Tuple = do_normalize
_lowerCAmelCase : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowerCAmelCase : str = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = PILImageResampling.BICUBIC , snake_case_ = None , **snake_case_ , ):
_lowerCAmelCase : Union[str, Any] = get_size_dict(snake_case_ , default_to_square=snake_case_ )
if "shortest_edge" not in size:
raise ValueError(f'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
_lowerCAmelCase : List[Any] = size["""shortest_edge"""]
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
_lowerCAmelCase : Dict = int(shortest_edge / crop_pct )
_lowerCAmelCase : int = get_resize_output_image_size(snake_case_ , size=snake_case_ , default_to_square=snake_case_ )
_lowerCAmelCase : Optional[Any] = resize(image=snake_case_ , size=snake_case_ , resample=snake_case_ , data_format=snake_case_ , **snake_case_ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=snake_case_ , size=(shortest_edge, shortest_edge) , data_format=snake_case_ , **snake_case_ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
snake_case_ , size=(shortest_edge, shortest_edge) , resample=snake_case_ , data_format=snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ = None , **snake_case_ , ):
return rescale(snake_case_ , scale=snake_case_ , data_format=snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = None , **snake_case_ , ):
return normalize(snake_case_ , mean=snake_case_ , std=snake_case_ , data_format=snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = ChannelDimension.FIRST , **snake_case_ , ):
_lowerCAmelCase : int = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase : Any = crop_pct if crop_pct is not None else self.crop_pct
_lowerCAmelCase : Tuple = resample if resample is not None else self.resample
_lowerCAmelCase : Tuple = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase : List[Any] = image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase : List[Any] = image_std if image_std is not None else self.image_std
_lowerCAmelCase : int = size if size is not None else self.size
_lowerCAmelCase : Optional[Any] = get_size_dict(snake_case_ , default_to_square=snake_case_ )
_lowerCAmelCase : Any = make_list_of_images(snake_case_ )
if not valid_images(snake_case_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_lowerCAmelCase : Union[str, Any] = [to_numpy_array(snake_case_ ) for image in images]
if do_resize:
_lowerCAmelCase : List[str] = [self.resize(image=snake_case_ , size=snake_case_ , crop_pct=snake_case_ , resample=snake_case_ ) for image in images]
if do_rescale:
_lowerCAmelCase : Any = [self.rescale(image=snake_case_ , scale=snake_case_ ) for image in images]
if do_normalize:
_lowerCAmelCase : str = [self.normalize(image=snake_case_ , mean=snake_case_ , std=snake_case_ ) for image in images]
_lowerCAmelCase : List[Any] = [to_channel_dimension_format(snake_case_ , snake_case_ ) for image in images]
_lowerCAmelCase : Any = {"""pixel_values""": images}
return BatchFeature(data=snake_case_ , tensor_type=snake_case_ )
| 309
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase_ = {
"""configuration_groupvit""": [
"""GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""GroupViTConfig""",
"""GroupViTOnnxConfig""",
"""GroupViTTextConfig""",
"""GroupViTVisionConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GroupViTModel""",
"""GroupViTPreTrainedModel""",
"""GroupViTTextModel""",
"""GroupViTVisionModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFGroupViTModel""",
"""TFGroupViTPreTrainedModel""",
"""TFGroupViTTextModel""",
"""TFGroupViTVisionModel""",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
| 1
|
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
UpperCamelCase_ = False
class a_ (unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class a_ (unittest.TestCase ):
def __UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[Any] = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
_lowerCAmelCase : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
_lowerCAmelCase : int = torch.manual_seed(0 )
_lowerCAmelCase : str = pipe.dual_guided(
prompt="""first prompt""" , image=snake_case_ , text_to_image_strength=0.75 , generator=snake_case_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(snake_case_ )
_lowerCAmelCase : List[Any] = VersatileDiffusionPipeline.from_pretrained(snake_case_ , torch_dtype=torch.floataa )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
_lowerCAmelCase : int = generator.manual_seed(0 )
_lowerCAmelCase : Tuple = pipe.dual_guided(
prompt="""first prompt""" , image=snake_case_ , text_to_image_strength=0.75 , generator=snake_case_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
_lowerCAmelCase : Optional[int] = """cyberpunk 2077"""
_lowerCAmelCase : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
_lowerCAmelCase : Any = torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = pipe.dual_guided(
prompt=snake_case_ , image=snake_case_ , text_to_image_strength=0.75 , generator=snake_case_ , guidance_scale=7.5 , num_inference_steps=5_0 , output_type="""numpy""" , ).images
_lowerCAmelCase : Optional[Any] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCAmelCase : Tuple = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_lowerCAmelCase : Dict = """A painting of a squirrel eating a burger """
_lowerCAmelCase : Tuple = torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = pipe.text_to_image(
prompt=snake_case_ , generator=snake_case_ , guidance_scale=7.5 , num_inference_steps=5_0 , output_type="""numpy""" ).images
_lowerCAmelCase : int = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCAmelCase : Union[str, Any] = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_lowerCAmelCase : List[str] = pipe.image_variation(snake_case_ , generator=snake_case_ , output_type="""numpy""" ).images
_lowerCAmelCase : str = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCAmelCase : List[Any] = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 309
|
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
UpperCamelCase_ = 0
UpperCamelCase_ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
UpperCamelCase_ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
UpperCamelCase_ = tuple[int, int]
class a_ :
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
_lowerCAmelCase : Optional[int] = pos_x
_lowerCAmelCase : List[str] = pos_y
_lowerCAmelCase : Tuple = (pos_y, pos_x)
_lowerCAmelCase : List[Any] = goal_x
_lowerCAmelCase : int = goal_y
_lowerCAmelCase : Union[str, Any] = g_cost
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : List[Any] = self.calculate_heuristic()
_lowerCAmelCase : Optional[int] = self.g_cost + self.h_cost
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.pos_x - self.goal_x
_lowerCAmelCase : Optional[int] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(snake_case_ ) + abs(snake_case_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , snake_case_ ):
return self.f_cost < other.f_cost
class a_ :
def __init__( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : Optional[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , snake_case_ )
_lowerCAmelCase : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , snake_case_ )
_lowerCAmelCase : List[str] = [self.start]
_lowerCAmelCase : list[Node] = []
_lowerCAmelCase : List[str] = False
def __UpperCamelCase ( self ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
_lowerCAmelCase : Optional[int] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(snake_case_ )
self.closed_nodes.append(snake_case_ )
_lowerCAmelCase : Optional[int] = self.get_successors(snake_case_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(snake_case_ )
else:
# retrieve the best current path
_lowerCAmelCase : Optional[Any] = self.open_nodes.pop(self.open_nodes.index(snake_case_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(snake_case_ )
else:
self.open_nodes.append(snake_case_ )
return [self.start.pos]
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Union[str, Any] = []
for action in delta:
_lowerCAmelCase : Union[str, Any] = parent.pos_x + action[1]
_lowerCAmelCase : Dict = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(snake_case_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
snake_case_ , snake_case_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , snake_case_ , ) )
return successors
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : List[Any] = node
_lowerCAmelCase : Optional[Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_lowerCAmelCase : Optional[int] = current_node.parent
path.reverse()
return path
class a_ :
def __init__( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : List[str] = AStar(snake_case_ , snake_case_ )
_lowerCAmelCase : int = AStar(snake_case_ , snake_case_ )
_lowerCAmelCase : Optional[int] = False
def __UpperCamelCase ( self ):
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
_lowerCAmelCase : Tuple = self.fwd_astar.open_nodes.pop(0 )
_lowerCAmelCase : Optional[Any] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
snake_case_ , snake_case_ )
self.fwd_astar.closed_nodes.append(snake_case_ )
self.bwd_astar.closed_nodes.append(snake_case_ )
_lowerCAmelCase : List[str] = current_bwd_node
_lowerCAmelCase : Dict = current_fwd_node
_lowerCAmelCase : Any = {
self.fwd_astar: self.fwd_astar.get_successors(snake_case_ ),
self.bwd_astar: self.bwd_astar.get_successors(snake_case_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(snake_case_ )
else:
# retrieve the best current path
_lowerCAmelCase : List[Any] = astar.open_nodes.pop(
astar.open_nodes.index(snake_case_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(snake_case_ )
else:
astar.open_nodes.append(snake_case_ )
return [self.fwd_astar.start.pos]
def __UpperCamelCase ( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : int = self.fwd_astar.retrace_path(snake_case_ )
_lowerCAmelCase : Optional[Any] = self.bwd_astar.retrace_path(snake_case_ )
bwd_path.pop()
bwd_path.reverse()
_lowerCAmelCase : Dict = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
UpperCamelCase_ = (0, 0)
UpperCamelCase_ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
UpperCamelCase_ = time.time()
UpperCamelCase_ = AStar(init, goal)
UpperCamelCase_ = a_star.search()
UpperCamelCase_ = time.time() - start_time
print(F'AStar execution time = {end_time:f} seconds')
UpperCamelCase_ = time.time()
UpperCamelCase_ = BidirectionalAStar(init, goal)
UpperCamelCase_ = time.time() - bd_start_time
print(F'BidirectionalAStar execution time = {bd_end_time:f} seconds')
| 309
| 1
|
'''simple docstring'''
from manim import *
class a_ (_a ):
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = Rectangle(height=0.5 , width=0.5 )
_lowerCAmelCase : List[str] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_lowerCAmelCase : Union[str, Any] = [mem.copy() for i in range(6 )]
_lowerCAmelCase : List[str] = [mem.copy() for i in range(6 )]
_lowerCAmelCase : List[Any] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
_lowerCAmelCase : Optional[int] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
_lowerCAmelCase : List[Any] = VGroup(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0 )
_lowerCAmelCase : Tuple = Text("""CPU""" , font_size=2_4 )
_lowerCAmelCase : List[str] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(snake_case_ )
_lowerCAmelCase : Union[str, Any] = [mem.copy() for i in range(1 )]
_lowerCAmelCase : Optional[Any] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
_lowerCAmelCase : List[str] = Text("""GPU""" , font_size=2_4 )
_lowerCAmelCase : Union[str, Any] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
gpu.align_to(snake_case_ , snake_case_ )
gpu.set_x(gpu.get_x() - 1 )
self.add(snake_case_ )
_lowerCAmelCase : str = [mem.copy() for i in range(6 )]
_lowerCAmelCase : Dict = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
_lowerCAmelCase : List[Any] = Text("""Model""" , font_size=2_4 )
_lowerCAmelCase : Dict = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
model.move_to([3, -1.0, 0] )
self.play(
Create(snake_case_ , run_time=1 ) , Create(snake_case_ , run_time=1 ) , Create(snake_case_ , run_time=1 ) , )
_lowerCAmelCase : Any = MarkupText(
f'First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.' , font_size=2_4 , )
_lowerCAmelCase : Tuple = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_lowerCAmelCase : Optional[Any] = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case_ , run_time=2.5 ) , Write(snake_case_ ) , Write(snake_case_ ) )
self.add(snake_case_ )
_lowerCAmelCase : List[str] = []
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : Union[str, Any] = []
for i, rect in enumerate(snake_case_ ):
_lowerCAmelCase : Union[str, Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(snake_case_ , opacity=0.7 )
cpu_target.move_to(snake_case_ )
cpu_target.generate_target()
_lowerCAmelCase : Dict = 0.46 / 4
_lowerCAmelCase : Union[str, Any] = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=snake_case_ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=snake_case_ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=snake_case_ , buff=0.0 )
cpu_targs.append(snake_case_ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(snake_case_ ) )
second_animations.append(MoveToTarget(snake_case_ , run_time=1.5 ) )
self.play(*snake_case_ )
self.play(*snake_case_ )
self.wait()
| 309
|
'''simple docstring'''
def _UpperCAmelCase ( _lowerCamelCase : list[int] , _lowerCamelCase : str ) -> list[int]:
_lowerCAmelCase : List[Any] = int(_lowerCamelCase )
# Initialize Result
_lowerCAmelCase : Any = []
# Traverse through all denomination
for denomination in reversed(_lowerCamelCase ):
# Find denominations
while int(_lowerCamelCase ) >= int(_lowerCamelCase ):
total_value -= int(_lowerCamelCase )
answer.append(_lowerCamelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
UpperCamelCase_ = []
UpperCamelCase_ = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
UpperCamelCase_ = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F'Denomination {i}: ').strip()))
UpperCamelCase_ = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
UpperCamelCase_ = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
UpperCamelCase_ = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F'Following is minimal change for {value}: ')
UpperCamelCase_ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 309
| 1
|
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class a_ (unittest.TestCase ):
def __UpperCamelCase ( self ):
_lowerCAmelCase : Any = inspect.getfile(accelerate.test_utils )
_lowerCAmelCase : Dict = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] )
_lowerCAmelCase : Dict = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_distributed_data_loop.py"""] )
_lowerCAmelCase : List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_ops.py"""] )
@require_multi_gpu
def __UpperCamelCase ( self ):
print(f'Found {torch.cuda.device_count()} devices.' )
_lowerCAmelCase : Tuple = ["""torchrun""", f'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case_ , env=os.environ.copy() )
@require_multi_gpu
def __UpperCamelCase ( self ):
print(f'Found {torch.cuda.device_count()} devices.' )
_lowerCAmelCase : str = ["""torchrun""", f'--nproc_per_node={torch.cuda.device_count()}', self.operation_file_path]
print(f'Command: {cmd}' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case_ , env=os.environ.copy() )
@require_multi_gpu
def __UpperCamelCase ( self ):
_lowerCAmelCase : Tuple = ["""torchrun""", f'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case_ , env=os.environ.copy() )
@require_multi_gpu
def __UpperCamelCase ( self ):
print(f'Found {torch.cuda.device_count()} devices, using 2 devices only' )
_lowerCAmelCase : Any = ["""torchrun""", f'--nproc_per_node={torch.cuda.device_count()}', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="""0,1""" ):
execute_subprocess_async(snake_case_ , env=os.environ.copy() )
if __name__ == "__main__":
UpperCamelCase_ = Accelerator()
UpperCamelCase_ = (accelerator.state.process_index + 2, 10)
UpperCamelCase_ = torch.randint(0, 10, shape).to(accelerator.device)
UpperCamelCase_ = """"""
UpperCamelCase_ = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
UpperCamelCase_ = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
UpperCamelCase_ = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 309
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase_ = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""EncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""TFEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""FlaxEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
| 1
|
'''simple docstring'''
import pprint
import requests
UpperCamelCase_ = """https://zenquotes.io/api"""
def _UpperCAmelCase ( ) -> list:
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def _UpperCAmelCase ( ) -> list:
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
UpperCamelCase_ = random_quotes()
pprint.pprint(response)
| 309
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class a_ (unittest.TestCase ):
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = """laion/clap-htsat-unfused"""
_lowerCAmelCase : int = tempfile.mkdtemp()
def __UpperCamelCase ( self , **snake_case_ ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **snake_case_ )
def __UpperCamelCase ( self , **snake_case_ ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **snake_case_ )
def __UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : List[Any] = self.get_feature_extractor()
_lowerCAmelCase : Union[str, Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : Any = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_lowerCAmelCase : int = self.get_feature_extractor(do_normalize=snake_case_ , padding_value=1.0 )
_lowerCAmelCase : Dict = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = self.get_feature_extractor()
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
_lowerCAmelCase : Union[str, Any] = floats_list((3, 1_0_0_0) )
_lowerCAmelCase : List[str] = feature_extractor(snake_case_ , return_tensors="""np""" )
_lowerCAmelCase : Optional[Any] = processor(audios=snake_case_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = self.get_feature_extractor()
_lowerCAmelCase : List[str] = self.get_tokenizer()
_lowerCAmelCase : Tuple = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
_lowerCAmelCase : Union[str, Any] = """This is a test string"""
_lowerCAmelCase : Union[str, Any] = processor(text=snake_case_ )
_lowerCAmelCase : Optional[int] = tokenizer(snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = self.get_feature_extractor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
_lowerCAmelCase : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase : List[Any] = processor.batch_decode(snake_case_ )
_lowerCAmelCase : Dict = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.get_feature_extractor()
_lowerCAmelCase : Dict = self.get_tokenizer()
_lowerCAmelCase : Optional[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 309
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {"""configuration_ibert""": ["""IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """IBertConfig""", """IBertOnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""IBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""IBertForMaskedLM""",
"""IBertForMultipleChoice""",
"""IBertForQuestionAnswering""",
"""IBertForSequenceClassification""",
"""IBertForTokenClassification""",
"""IBertModel""",
"""IBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
|
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = """▁"""
UpperCamelCase_ = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
UpperCamelCase_ = {
"""vocab_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json""",
},
"""spm_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model""",
},
"""tokenizer_config_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json""",
},
}
UpperCamelCase_ = {
"""facebook/m2m100_418M""": 10_24,
}
# fmt: off
UpperCamelCase_ = {
"""m2m100""": ["""af""", """am""", """ar""", """ast""", """az""", """ba""", """be""", """bg""", """bn""", """br""", """bs""", """ca""", """ceb""", """cs""", """cy""", """da""", """de""", """el""", """en""", """es""", """et""", """fa""", """ff""", """fi""", """fr""", """fy""", """ga""", """gd""", """gl""", """gu""", """ha""", """he""", """hi""", """hr""", """ht""", """hu""", """hy""", """id""", """ig""", """ilo""", """is""", """it""", """ja""", """jv""", """ka""", """kk""", """km""", """kn""", """ko""", """lb""", """lg""", """ln""", """lo""", """lt""", """lv""", """mg""", """mk""", """ml""", """mn""", """mr""", """ms""", """my""", """ne""", """nl""", """no""", """ns""", """oc""", """or""", """pa""", """pl""", """ps""", """pt""", """ro""", """ru""", """sd""", """si""", """sk""", """sl""", """so""", """sq""", """sr""", """ss""", """su""", """sv""", """sw""", """ta""", """th""", """tl""", """tn""", """tr""", """uk""", """ur""", """uz""", """vi""", """wo""", """xh""", """yi""", """yo""", """zh""", """zu"""],
"""wmt21""": ["""en""", """ha""", """is""", """ja""", """cs""", """ru""", """zh""", """de"""]
}
class a_ (_a ):
__lowerCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
__lowerCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Dict = ["""input_ids""", """attention_mask"""]
__lowerCAmelCase : List[int] = []
__lowerCAmelCase : List[int] = []
def __init__( self , snake_case_ , snake_case_ , snake_case_=None , snake_case_=None , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<pad>" , snake_case_="<unk>" , snake_case_="m2m100" , snake_case_ = None , snake_case_=8 , **snake_case_ , ):
_lowerCAmelCase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCAmelCase : Optional[Any] = language_codes
_lowerCAmelCase : Tuple = FAIRSEQ_LANGUAGE_CODES[language_codes]
_lowerCAmelCase : str = {lang_code: f'__{lang_code}__' for lang_code in fairseq_language_code}
_lowerCAmelCase : int = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(snake_case_ )
for lang_code in fairseq_language_code
if self.get_lang_token(snake_case_ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=snake_case_ , tgt_lang=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , language_codes=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=snake_case_ , **snake_case_ , )
_lowerCAmelCase : Optional[int] = vocab_file
_lowerCAmelCase : Any = load_json(snake_case_ )
_lowerCAmelCase : str = {v: k for k, v in self.encoder.items()}
_lowerCAmelCase : Union[str, Any] = spm_file
_lowerCAmelCase : Tuple = load_spm(snake_case_ , self.sp_model_kwargs )
_lowerCAmelCase : int = len(self.encoder )
_lowerCAmelCase : Union[str, Any] = {
self.get_lang_token(snake_case_ ): self.encoder_size + i for i, lang_code in enumerate(snake_case_ )
}
_lowerCAmelCase : List[str] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(snake_case_ )}
_lowerCAmelCase : Optional[Any] = {v: k for k, v in self.lang_token_to_id.items()}
_lowerCAmelCase : Any = src_lang if src_lang is not None else """en"""
_lowerCAmelCase : Optional[int] = tgt_lang
_lowerCAmelCase : Tuple = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
_lowerCAmelCase : List[Any] = num_madeup_words
@property
def __UpperCamelCase ( self ):
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def __UpperCamelCase ( self ):
return self._src_lang
@src_lang.setter
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCamelCase ( self , snake_case_ ):
return self.sp_model.encode(snake_case_ , out_type=snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(snake_case_ , self.encoder[self.unk_token] )
def __UpperCamelCase ( self , snake_case_ ):
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(snake_case_ , self.unk_token )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = []
_lowerCAmelCase : Optional[int] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(snake_case_ ) + token
_lowerCAmelCase : Optional[Any] = []
else:
current_sub_tokens.append(snake_case_ )
out_string += self.sp_model.decode(snake_case_ )
return out_string.strip()
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
_lowerCAmelCase : List[Any] = [1] * len(self.prefix_tokens )
_lowerCAmelCase : Dict = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(snake_case_ )) + suffix_ones
return prefix_ones + ([0] * len(snake_case_ )) + ([0] * len(snake_case_ )) + suffix_ones
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
_lowerCAmelCase : int = self.__dict__.copy()
_lowerCAmelCase : str = None
return state
def __setstate__( self , snake_case_ ):
_lowerCAmelCase : List[str] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase : str = {}
_lowerCAmelCase : str = load_spm(self.spm_file , self.sp_model_kwargs )
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : Dict = Path(snake_case_ )
if not save_dir.is_dir():
raise OSError(f'{save_directory} should be a directory' )
_lowerCAmelCase : Any = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
_lowerCAmelCase : Any = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , snake_case_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , snake_case_ )
elif not os.path.isfile(self.spm_file ):
with open(snake_case_ , """wb""" ) as fi:
_lowerCAmelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (str(snake_case_ ), str(snake_case_ ))
def __UpperCamelCase ( self , snake_case_ , snake_case_ = "en" , snake_case_ = None , snake_case_ = "ro" , **snake_case_ , ):
_lowerCAmelCase : Union[str, Any] = src_lang
_lowerCAmelCase : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(snake_case_ , snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
_lowerCAmelCase : Dict = src_lang
_lowerCAmelCase : str = self(snake_case_ , add_special_tokens=snake_case_ , **snake_case_ )
_lowerCAmelCase : Union[str, Any] = self.get_lang_id(snake_case_ )
_lowerCAmelCase : Tuple = tgt_lang_id
return inputs
def __UpperCamelCase ( self ):
self.set_src_lang_special_tokens(self.src_lang )
def __UpperCamelCase ( self ):
self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Optional[Any] = self.get_lang_token(snake_case_ )
_lowerCAmelCase : List[Any] = self.lang_token_to_id[lang_token]
_lowerCAmelCase : Any = [self.cur_lang_id]
_lowerCAmelCase : Any = [self.eos_token_id]
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = self.get_lang_token(snake_case_ )
_lowerCAmelCase : int = self.lang_token_to_id[lang_token]
_lowerCAmelCase : str = [self.cur_lang_id]
_lowerCAmelCase : str = [self.eos_token_id]
def __UpperCamelCase ( self , snake_case_ ):
return self.lang_code_to_token[lang]
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : List[str] = self.get_lang_token(snake_case_ )
return self.lang_token_to_id[lang_token]
def _UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
_lowerCAmelCase : Optional[Any] = sentencepiece.SentencePieceProcessor(**_lowerCamelCase )
spm.Load(str(_lowerCamelCase ) )
return spm
def _UpperCAmelCase ( _lowerCamelCase : str ) -> Union[Dict, List]:
with open(_lowerCamelCase , """r""" ) as f:
return json.load(_lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : str ) -> None:
with open(_lowerCamelCase , """w""" ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase , indent=2 )
| 309
| 1
|
'''simple docstring'''
def _UpperCAmelCase ( _lowerCamelCase : list , _lowerCamelCase : list , _lowerCamelCase : int ) -> list:
_lowerCAmelCase : int = len(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = [[0] * n for i in range(_lowerCamelCase )]
for i in range(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = y_points[i]
for i in range(2 , _lowerCamelCase ):
for j in range(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Any = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 309
|
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def _UpperCAmelCase ( _lowerCamelCase : Callable , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ) -> np.ndarray:
_lowerCAmelCase : Union[str, Any] = int(np.ceil((x_end - xa) / step_size ) )
_lowerCAmelCase : Tuple = np.zeros((n + 1,) )
_lowerCAmelCase : List[Any] = ya
_lowerCAmelCase : int = xa
for k in range(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = y[k] + step_size * ode_func(_lowerCamelCase , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 309
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase_ = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""EncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""TFEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""FlaxEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
|
'''simple docstring'''
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("""socket.socket""" )
@patch("""builtins.open""" )
def _UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] ) -> Union[str, Any]:
# ===== initialization =====
_lowerCAmelCase : Tuple = Mock()
_lowerCAmelCase : Any = conn, Mock()
_lowerCAmelCase : Optional[Any] = iter([1, None] )
_lowerCAmelCase : str = lambda _lowerCamelCase : next(_lowerCamelCase )
# ===== invoke =====
send_file(filename="""mytext.txt""" , testing=_lowerCamelCase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 309
| 1
|
'''simple docstring'''
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = """▁"""
UpperCamelCase_ = {"""vocab_file""": """vocab.txt""", """sentencepiece_model_ckpt""": """sentencepiece.bpe.model"""}
UpperCamelCase_ = {
"""sentencepiece_model_file""": """sentencepiece.bpe.model""",
"""vocab_file""": """vocab.txt""",
}
UpperCamelCase_ = {
"""vocab_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
},
"""sentencepiece_model_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
},
}
UpperCamelCase_ = {
"""ernie-m-base""": 5_14,
"""ernie-m-large""": 5_14,
}
UpperCamelCase_ = {
"""ernie-m-base""": {"""do_lower_case""": False},
"""ernie-m-large""": {"""do_lower_case""": False},
}
class a_ (_a ):
__lowerCAmelCase : List[str] = ["input_ids"]
__lowerCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES
__lowerCAmelCase : List[Any] = PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : int = RESOURCE_FILES_NAMES
def __init__( self , snake_case_ , snake_case_=None , snake_case_=False , snake_case_="utf8" , snake_case_="[UNK]" , snake_case_="[SEP]" , snake_case_="[PAD]" , snake_case_="[CLS]" , snake_case_="[MASK]" , snake_case_ = None , **snake_case_ , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_lowerCAmelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , vocab_file=snake_case_ , encoding=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , **snake_case_ , )
_lowerCAmelCase : Optional[Any] = do_lower_case
_lowerCAmelCase : Any = sentencepiece_model_ckpt
_lowerCAmelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case_ )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
_lowerCAmelCase : int = self.load_vocab(filepath=snake_case_ )
else:
_lowerCAmelCase : str = {self.sp_model.id_to_piece(snake_case_ ): id for id in range(self.sp_model.get_piece_size() )}
_lowerCAmelCase : str = {v: k for k, v in self.vocab.items()}
def __UpperCamelCase ( self , snake_case_ ):
if text is None:
return None
_lowerCAmelCase : List[str] = self.tokenize(snake_case_ )
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = """""", []
for i, ch in enumerate(snake_case_ ):
if ch in self.SP_CHAR_MAPPING:
_lowerCAmelCase : Union[str, Any] = self.SP_CHAR_MAPPING.get(snake_case_ )
else:
_lowerCAmelCase : Union[str, Any] = unicodedata.normalize("""NFKC""" , snake_case_ )
if self.is_whitespace(snake_case_ ):
continue
normalized_text += ch
char_mapping.extend([i] * len(snake_case_ ) )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = normalized_text, [], 0
if self.do_lower_case:
_lowerCAmelCase : Optional[int] = text.lower()
for token in split_tokens:
if token[:1] == "▁":
_lowerCAmelCase : List[str] = token[1:]
_lowerCAmelCase : Tuple = text[offset:].index(snake_case_ ) + offset
_lowerCAmelCase : Optional[int] = start + len(snake_case_ )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
_lowerCAmelCase : Dict = end
return token_mapping
@property
def __UpperCamelCase ( self ):
return len(self.vocab )
def __UpperCamelCase ( self ):
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ):
_lowerCAmelCase : Any = self.__dict__.copy()
_lowerCAmelCase : Optional[int] = None
return state
def __setstate__( self , snake_case_ ):
_lowerCAmelCase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase : Optional[int] = {}
_lowerCAmelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def __UpperCamelCase ( self , snake_case_ ):
return "".join((self.SP_CHAR_MAPPING.get(snake_case_ , snake_case_ ) for c in text) )
def __UpperCamelCase ( self , snake_case_ , snake_case_=False , snake_case_=6_4 , snake_case_=0.1 ):
if self.sp_model_kwargs.get("""enable_sampling""" ) is True:
_lowerCAmelCase : Dict = True
if self.sp_model_kwargs.get("""alpha""" ) is not None:
_lowerCAmelCase : str = self.sp_model_kwargs.get("""alpha""" )
if self.sp_model_kwargs.get("""nbest_size""" ) is not None:
_lowerCAmelCase : List[Any] = self.sp_model_kwargs.get("""nbest_size""" )
if not enable_sampling:
_lowerCAmelCase : str = self.sp_model.EncodeAsPieces(snake_case_ )
else:
_lowerCAmelCase : Union[str, Any] = self.sp_model.SampleEncodeAsPieces(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : Optional[int] = []
for pi, piece in enumerate(snake_case_ ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(snake_case_ ) and pi != 0:
new_pieces.append(snake_case_ )
continue
else:
continue
_lowerCAmelCase : int = 0
for i, chunk in enumerate(snake_case_ ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(snake_case_ ) or self.is_punct(snake_case_ ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(snake_case_ )
_lowerCAmelCase : Optional[int] = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
_lowerCAmelCase : List[Any] = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
_lowerCAmelCase : Any = i
if len(snake_case_ ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = """""".join(snake_case_ ).replace(snake_case_ , """ """ ).strip()
return out_string
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Tuple = self.convert_ids_to_tokens(snake_case_ )
_lowerCAmelCase : Dict = """""".join(snake_case_ ).replace(snake_case_ , """ """ ).strip()
return out_string
def __UpperCamelCase ( self , snake_case_ ):
return self.vocab.get(snake_case_ , self.vocab.get(self.unk_token ) )
def __UpperCamelCase ( self , snake_case_ ):
return self.reverse_vocab.get(snake_case_ , self.unk_token )
def __UpperCamelCase ( self , snake_case_ , snake_case_=None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCAmelCase : Union[str, Any] = [self.cls_token_id]
_lowerCAmelCase : Tuple = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def __UpperCamelCase ( self , snake_case_ , snake_case_=None ):
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def __UpperCamelCase ( self , snake_case_ , snake_case_=None , snake_case_=False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(snake_case_ )) + [1, 1] + ([0] * len(snake_case_ )) + [1]
return [1] + ([0] * len(snake_case_ )) + [1]
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(snake_case_ ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(snake_case_ ) + 1) + [1] * (len(snake_case_ ) + 3)
def __UpperCamelCase ( self , snake_case_ ):
if "\u4e00" <= char <= "\u9fff":
return True
return False
def __UpperCamelCase ( self , snake_case_ ):
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def __UpperCamelCase ( self , snake_case_ ):
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def __UpperCamelCase ( self , snake_case_ ):
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(snake_case_ ) == 1:
_lowerCAmelCase : str = unicodedata.category(snake_case_ )
if cat == "Zs":
return True
return False
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Optional[Any] = {}
with io.open(snake_case_ , """r""" , encoding="""utf-8""" ) as f:
for index, line in enumerate(snake_case_ ):
_lowerCAmelCase : List[str] = line.rstrip("""\n""" )
_lowerCAmelCase : List[str] = int(snake_case_ )
return token_to_idx
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : str = 0
if os.path.isdir(snake_case_ ):
_lowerCAmelCase : str = os.path.join(
snake_case_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
_lowerCAmelCase : Dict = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
with open(snake_case_ , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda snake_case_ : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
""" Please check that the vocabulary is not corrupted!""" )
_lowerCAmelCase : Union[str, Any] = token_index
writer.write(token + """\n""" )
index += 1
_lowerCAmelCase : Optional[Any] = os.path.join(snake_case_ , """sentencepiece.bpe.model""" )
with open(snake_case_ , """wb""" ) as fi:
_lowerCAmelCase : str = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (vocab_file,)
| 309
|
'''simple docstring'''
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a_ :
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=3_0 , snake_case_=2 , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=3_2 , snake_case_=5 , snake_case_=4 , snake_case_=3_7 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=1_0 , snake_case_=0.02 , snake_case_=None , ):
_lowerCAmelCase : Optional[Any] = parent
_lowerCAmelCase : Any = batch_size
_lowerCAmelCase : Tuple = image_size
_lowerCAmelCase : int = patch_size
_lowerCAmelCase : Any = num_channels
_lowerCAmelCase : str = is_training
_lowerCAmelCase : Any = use_labels
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : int = num_hidden_layers
_lowerCAmelCase : Dict = num_attention_heads
_lowerCAmelCase : Union[str, Any] = intermediate_size
_lowerCAmelCase : Dict = hidden_act
_lowerCAmelCase : str = hidden_dropout_prob
_lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
_lowerCAmelCase : Any = type_sequence_label_size
_lowerCAmelCase : str = initializer_range
_lowerCAmelCase : Optional[Any] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase : List[Any] = (image_size // patch_size) ** 2
_lowerCAmelCase : Dict = num_patches + 1
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : List[str] = None
if self.use_labels:
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ):
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : List[Any] = ViTMSNModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : Tuple = self.type_sequence_label_size
_lowerCAmelCase : int = ViTMSNForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : Optional[int] = model(snake_case_ , labels=snake_case_ )
print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" )
print("""Labels: {labels}""" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCAmelCase : int = 1
_lowerCAmelCase : List[str] = ViTMSNForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[int] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = config_and_inputs
_lowerCAmelCase : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a_ (_a , _a , unittest.TestCase ):
__lowerCAmelCase : Tuple = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__lowerCAmelCase : Optional[int] = (
{"""feature-extraction""": ViTMSNModel, """image-classification""": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__lowerCAmelCase : Dict = False
__lowerCAmelCase : Optional[Any] = False
__lowerCAmelCase : List[str] = False
__lowerCAmelCase : Any = False
def __UpperCamelCase ( self ):
_lowerCAmelCase : Tuple = ViTMSNModelTester(self )
_lowerCAmelCase : int = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMSN does not use inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : List[str] = model_class(snake_case_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case_ , nn.Linear ) )
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[int] = model_class(snake_case_ )
_lowerCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Optional[Any] = [*signature.parameters.keys()]
_lowerCAmelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@slow
def __UpperCamelCase ( self ):
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Optional[int] = ViTMSNModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def _UpperCAmelCase ( ) -> Tuple:
_lowerCAmelCase : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a_ (unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self ):
return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None
@slow
def __UpperCamelCase ( self ):
torch.manual_seed(2 )
_lowerCAmelCase : Dict = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(snake_case_ )
_lowerCAmelCase : Dict = self.default_image_processor
_lowerCAmelCase : Any = prepare_img()
_lowerCAmelCase : List[str] = image_processor(images=snake_case_ , return_tensors="""pt""" ).to(snake_case_ )
# forward pass
with torch.no_grad():
_lowerCAmelCase : Dict = model(**snake_case_ )
# verify the logits
_lowerCAmelCase : Dict = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case_ )
_lowerCAmelCase : Tuple = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1E-4 ) )
| 309
| 1
|
'''simple docstring'''
from math import loga
def _UpperCAmelCase ( _lowerCamelCase : int ) -> int:
if a < 0:
raise ValueError("""Input value must be a positive integer""" )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("""Input value must be a 'int' type""" )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 309
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class a_ (_a ):
__lowerCAmelCase : List[Any] = """microsoft/speecht5_tts"""
__lowerCAmelCase : List[Any] = (
"""This is a tool that reads an English text out loud. It takes an input named `text` which should contain the """
"""text to read (in English) and returns a waveform object containing the sound."""
)
__lowerCAmelCase : List[str] = """text_reader"""
__lowerCAmelCase : Optional[Any] = SpeechTaProcessor
__lowerCAmelCase : str = SpeechTaForTextToSpeech
__lowerCAmelCase : int = SpeechTaHifiGan
__lowerCAmelCase : int = ["""text"""]
__lowerCAmelCase : int = ["""audio"""]
def __UpperCamelCase ( self ):
if self.post_processor is None:
_lowerCAmelCase : int = """microsoft/speecht5_hifigan"""
super().setup()
def __UpperCamelCase ( self , snake_case_ , snake_case_=None ):
_lowerCAmelCase : Tuple = self.pre_processor(text=snake_case_ , return_tensors="""pt""" , truncation=snake_case_ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" )
_lowerCAmelCase : List[str] = load_dataset("""Matthijs/cmu-arctic-xvectors""" , split="""validation""" )
_lowerCAmelCase : Any = torch.tensor(embeddings_dataset[7_3_0_5]["""xvector"""] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def __UpperCamelCase ( self , snake_case_ ):
with torch.no_grad():
return self.model.generate_speech(**snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
with torch.no_grad():
return self.post_processor(snake_case_ ).cpu().detach()
| 309
| 1
|
'''simple docstring'''
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class a_ (_a ):
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
super().__init__()
_lowerCAmelCase : List[str] = value_function
_lowerCAmelCase : List[str] = unet
_lowerCAmelCase : Dict = scheduler
_lowerCAmelCase : int = env
_lowerCAmelCase : Union[str, Any] = env.get_dataset()
_lowerCAmelCase : List[Any] = {}
for key in self.data.keys():
try:
_lowerCAmelCase : List[Any] = self.data[key].mean()
except: # noqa: E722
pass
_lowerCAmelCase : int = {}
for key in self.data.keys():
try:
_lowerCAmelCase : Optional[int] = self.data[key].std()
except: # noqa: E722
pass
_lowerCAmelCase : List[Any] = env.observation_space.shape[0]
_lowerCAmelCase : List[str] = env.action_space.shape[0]
def __UpperCamelCase ( self , snake_case_ , snake_case_ ):
return (x_in - self.means[key]) / self.stds[key]
def __UpperCamelCase ( self , snake_case_ , snake_case_ ):
return x_in * self.stds[key] + self.means[key]
def __UpperCamelCase ( self , snake_case_ ):
if type(snake_case_ ) is dict:
return {k: self.to_torch(snake_case_ ) for k, v in x_in.items()}
elif torch.is_tensor(snake_case_ ):
return x_in.to(self.unet.device )
return torch.tensor(snake_case_ , device=self.unet.device )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
for key, val in cond.items():
_lowerCAmelCase : List[Any] = val.clone()
return x_in
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : Optional[Any] = x.shape[0]
_lowerCAmelCase : Optional[int] = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
_lowerCAmelCase : Dict = torch.full((batch_size,) , snake_case_ , device=self.unet.device , dtype=torch.long )
for _ in range(snake_case_ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
_lowerCAmelCase : int = self.value_function(x.permute(0 , 2 , 1 ) , snake_case_ ).sample
_lowerCAmelCase : Any = torch.autograd.grad([y.sum()] , [x] )[0]
_lowerCAmelCase : Any = self.scheduler._get_variance(snake_case_ )
_lowerCAmelCase : Union[str, Any] = torch.exp(0.5 * posterior_variance )
_lowerCAmelCase : str = model_std * grad
_lowerCAmelCase : Optional[Any] = 0
_lowerCAmelCase : Union[str, Any] = x.detach()
_lowerCAmelCase : str = x + scale * grad
_lowerCAmelCase : List[str] = self.reset_xa(snake_case_ , snake_case_ , self.action_dim )
_lowerCAmelCase : str = self.unet(x.permute(0 , 2 , 1 ) , snake_case_ ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
_lowerCAmelCase : Dict = self.scheduler.step(snake_case_ , snake_case_ , snake_case_ , predict_epsilon=snake_case_ )["""prev_sample"""]
# apply conditions to the trajectory (set the initial state)
_lowerCAmelCase : int = self.reset_xa(snake_case_ , snake_case_ , self.action_dim )
_lowerCAmelCase : Optional[Any] = self.to_torch(snake_case_ )
return x, y
def __call__( self , snake_case_ , snake_case_=6_4 , snake_case_=3_2 , snake_case_=2 , snake_case_=0.1 ):
# normalize the observations and create batch dimension
_lowerCAmelCase : int = self.normalize(snake_case_ , """observations""" )
_lowerCAmelCase : Dict = obs[None].repeat(snake_case_ , axis=0 )
_lowerCAmelCase : List[str] = {0: self.to_torch(snake_case_ )}
_lowerCAmelCase : Dict = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
_lowerCAmelCase : Tuple = randn_tensor(snake_case_ , device=self.unet.device )
_lowerCAmelCase : Optional[Any] = self.reset_xa(snake_case_ , snake_case_ , self.action_dim )
_lowerCAmelCase : str = self.to_torch(snake_case_ )
# run the diffusion process
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.run_diffusion(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# sort output trajectories by value
_lowerCAmelCase : Tuple = y.argsort(0 , descending=snake_case_ ).squeeze()
_lowerCAmelCase : List[str] = x[sorted_idx]
_lowerCAmelCase : int = sorted_values[:, :, : self.action_dim]
_lowerCAmelCase : Optional[int] = actions.detach().cpu().numpy()
_lowerCAmelCase : int = self.de_normalize(snake_case_ , key="""actions""" )
# select the action with the highest value
if y is not None:
_lowerCAmelCase : Union[str, Any] = 0
else:
# if we didn't run value guiding, select a random action
_lowerCAmelCase : str = np.random.randint(0 , snake_case_ )
_lowerCAmelCase : Dict = denorm_actions[selected_index, 0]
return denorm_actions
| 309
|
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _UpperCAmelCase ( ) -> Tuple:
_lowerCAmelCase : List[Any] = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"""
_lowerCAmelCase : int = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert("""RGB""" )
return image
def _UpperCAmelCase ( _lowerCamelCase : Any ) -> Dict:
_lowerCAmelCase : str = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.weight', f'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.bias', f'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.weight', f'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.bias', f'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.qkv.weight', f'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.weight', f'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.bias', f'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.weight', f'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.bias', f'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.weight', f'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.bias', f'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") )
# fmt: on
return rename_keys
def _UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any] ) -> Optional[Any]:
_lowerCAmelCase : str = dct.pop(_lowerCamelCase )
_lowerCAmelCase : str = val
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple ) -> Tuple:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_lowerCAmelCase : Tuple = state_dict.pop(f'visual_encoder.blocks.{i}.attn.q_bias' )
_lowerCAmelCase : Optional[Any] = state_dict.pop(f'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
_lowerCAmelCase : int = torch.cat((q_bias, torch.zeros_like(_lowerCamelCase , requires_grad=_lowerCamelCase ), v_bias) )
_lowerCAmelCase : str = qkv_bias
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] ) -> List[Any]:
_lowerCAmelCase : str = 3_64 if """coco""" in model_name else 2_24
_lowerCAmelCase : str = BlipaVisionConfig(image_size=_lowerCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
_lowerCAmelCase : int = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=_lowerCamelCase ).to_dict()
elif "opt-6.7b" in model_name:
_lowerCAmelCase : Union[str, Any] = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=_lowerCamelCase ).to_dict()
elif "t5-xl" in model_name:
_lowerCAmelCase : Optional[int] = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_lowerCAmelCase : str = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
_lowerCAmelCase : Dict = BlipaConfig(vision_config=_lowerCamelCase , text_config=_lowerCamelCase )
return config, image_size
@torch.no_grad()
def _UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : List[Any]=None , _lowerCamelCase : int=False ) -> List[str]:
_lowerCAmelCase : int = (
AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" )
if """opt""" in model_name
else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" )
)
_lowerCAmelCase : List[Any] = tokenizer("""\n""" , add_special_tokens=_lowerCamelCase ).input_ids[0]
_lowerCAmelCase , _lowerCAmelCase : List[str] = get_blipa_config(_lowerCamelCase , eos_token_id=_lowerCamelCase )
_lowerCAmelCase : Optional[int] = BlipaForConditionalGeneration(_lowerCamelCase ).eval()
_lowerCAmelCase : Union[str, Any] = {
"""blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""),
"""blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""),
"""blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""),
"""blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""),
"""blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""),
"""blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""),
"""blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""),
}
_lowerCAmelCase , _lowerCAmelCase : List[str] = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
_lowerCAmelCase : Dict = """cuda""" if torch.cuda.is_available() else """cpu"""
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = load_model_and_preprocess(
name=_lowerCamelCase , model_type=_lowerCamelCase , is_eval=_lowerCamelCase , device=_lowerCamelCase )
original_model.eval()
print("""Done!""" )
# update state dict keys
_lowerCAmelCase : List[Any] = original_model.state_dict()
_lowerCAmelCase : Optional[int] = create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_lowerCAmelCase : Tuple = state_dict.pop(_lowerCamelCase )
if key.startswith("""Qformer.bert""" ):
_lowerCAmelCase : List[Any] = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
_lowerCAmelCase : Optional[int] = key.replace("""self""" , """attention""" )
if "opt_proj" in key:
_lowerCAmelCase : Dict = key.replace("""opt_proj""" , """language_projection""" )
if "t5_proj" in key:
_lowerCAmelCase : Tuple = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""opt""" ):
_lowerCAmelCase : List[Any] = key.replace("""opt""" , """language""" )
if key.startswith("""t5""" ):
_lowerCAmelCase : int = key.replace("""t5""" , """language""" )
_lowerCAmelCase : Tuple = val
# read in qv biases
read_in_q_v_bias(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = hf_model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert len(_lowerCamelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
_lowerCAmelCase : Union[str, Any] = load_demo_image()
_lowerCAmelCase : Optional[int] = vis_processors["""eval"""](_lowerCamelCase ).unsqueeze(0 ).to(_lowerCamelCase )
_lowerCAmelCase : List[str] = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(_lowerCamelCase )
# create processor
_lowerCAmelCase : Optional[int] = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=_lowerCamelCase , image_std=_lowerCamelCase )
_lowerCAmelCase : Tuple = BlipaProcessor(image_processor=_lowerCamelCase , tokenizer=_lowerCamelCase )
_lowerCAmelCase : Any = processor(images=_lowerCamelCase , return_tensors="""pt""" ).pixel_values.to(_lowerCamelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
original_model.to(_lowerCamelCase )
hf_model.to(_lowerCamelCase )
with torch.no_grad():
if "opt" in model_name:
_lowerCAmelCase : Optional[Any] = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits
_lowerCAmelCase : Optional[Any] = hf_model(_lowerCamelCase , _lowerCamelCase ).logits
else:
_lowerCAmelCase : List[Any] = original_model(
{"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits
_lowerCAmelCase : Tuple = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
_lowerCAmelCase : Dict = hf_model(_lowerCamelCase , _lowerCamelCase , labels=_lowerCamelCase ).logits
assert original_logits.shape == logits.shape
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
_lowerCAmelCase : Any = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=_lowerCamelCase )
assert torch.allclose(logits[0, :3, :3] , _lowerCamelCase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
_lowerCAmelCase : List[Any] = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=_lowerCamelCase )
else:
# cast to same type
_lowerCAmelCase : Union[str, Any] = logits.dtype
assert torch.allclose(original_logits.to(_lowerCamelCase ) , _lowerCamelCase , atol=1e-2 )
print("""Looks ok!""" )
print("""Generating a caption...""" )
_lowerCAmelCase : Optional[int] = """"""
_lowerCAmelCase : Union[str, Any] = tokenizer(_lowerCamelCase , return_tensors="""pt""" ).input_ids.to(_lowerCamelCase )
_lowerCAmelCase : List[Any] = original_model.generate({"""image""": original_pixel_values} )
_lowerCAmelCase : Dict = hf_model.generate(
_lowerCamelCase , _lowerCamelCase , do_sample=_lowerCamelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("""Original generation:""" , _lowerCamelCase )
_lowerCAmelCase : int = input_ids.shape[1]
_lowerCAmelCase : str = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowerCamelCase )
_lowerCAmelCase : List[str] = [text.strip() for text in output_text]
print("""HF generation:""" , _lowerCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if push_to_hub:
processor.push_to_hub(f'nielsr/{model_name}' )
hf_model.push_to_hub(f'nielsr/{model_name}' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
UpperCamelCase_ = [
"""blip2-opt-2.7b""",
"""blip2-opt-6.7b""",
"""blip2-opt-2.7b-coco""",
"""blip2-opt-6.7b-coco""",
"""blip2-flan-t5-xl""",
"""blip2-flan-t5-xl-coco""",
"""blip2-flan-t5-xxl""",
]
parser.add_argument(
"""--model_name""",
default="""blip2-opt-2.7b""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
UpperCamelCase_ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 309
| 1
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class a_ (_a ):
def __init__( self , *snake_case_ , **snake_case_ ):
warnings.warn(
"""The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DonutImageProcessor instead.""" , snake_case_ , )
super().__init__(*snake_case_ , **snake_case_ )
| 309
|
'''simple docstring'''
import argparse
import os
import re
UpperCamelCase_ = """src/diffusers"""
# Pattern that looks at the indentation in a line.
UpperCamelCase_ = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
UpperCamelCase_ = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
UpperCamelCase_ = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
UpperCamelCase_ = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
UpperCamelCase_ = re.compile(r"""\[([^\]]+)\]""")
def _UpperCAmelCase ( _lowerCamelCase : List[Any] ) -> str:
_lowerCAmelCase : Dict = _re_indent.search(_lowerCamelCase )
return "" if search is None else search.groups()[0]
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str]="" , _lowerCamelCase : str=None , _lowerCamelCase : List[Any]=None ) -> str:
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Tuple = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(_lowerCamelCase ):
index += 1
_lowerCAmelCase : List[Any] = ["""\n""".join(lines[:index] )]
else:
_lowerCAmelCase : List[str] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_lowerCAmelCase : Union[str, Any] = [lines[index]]
index += 1
while index < len(_lowerCamelCase ) and (end_prompt is None or not lines[index].startswith(_lowerCamelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_lowerCamelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(_lowerCamelCase ) )
if index < len(_lowerCamelCase ) - 1:
_lowerCAmelCase : Union[str, Any] = [lines[index + 1]]
index += 1
else:
_lowerCAmelCase : Dict = []
else:
blocks.append("""\n""".join(_lowerCamelCase ) )
_lowerCAmelCase : Tuple = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_lowerCamelCase ) > 0:
blocks.append("""\n""".join(_lowerCamelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_lowerCamelCase ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] ) -> Any:
def _inner(_lowerCamelCase : Any ):
return key(_lowerCamelCase ).lower().replace("""_""" , """""" )
return _inner
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple=None ) -> Union[str, Any]:
# If no key is provided, we use a noop.
def noop(_lowerCamelCase : List[Any] ):
return x
if key is None:
_lowerCAmelCase : Union[str, Any] = noop
# Constants are all uppercase, they go first.
_lowerCAmelCase : Any = [obj for obj in objects if key(_lowerCamelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_lowerCAmelCase : Union[str, Any] = [obj for obj in objects if key(_lowerCamelCase )[0].isupper() and not key(_lowerCamelCase ).isupper()]
# Functions begin with a lowercase, they go last.
_lowerCAmelCase : Optional[Any] = [obj for obj in objects if not key(_lowerCamelCase )[0].isupper()]
_lowerCAmelCase : List[str] = ignore_underscore(_lowerCamelCase )
return sorted(_lowerCamelCase , key=_lowerCamelCase ) + sorted(_lowerCamelCase , key=_lowerCamelCase ) + sorted(_lowerCamelCase , key=_lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : str ) -> str:
# This inner function sort imports between [ ].
def _replace(_lowerCamelCase : Union[str, Any] ):
_lowerCAmelCase : Optional[Any] = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
_lowerCAmelCase : List[str] = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : int = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(_lowerCamelCase )] ) + "]"
_lowerCAmelCase : Optional[int] = import_statement.split("""\n""" )
if len(_lowerCamelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_lowerCAmelCase : Dict = 2 if lines[1].strip() == """[""" else 1
_lowerCAmelCase : Tuple = [(i, _re_strip_line.search(_lowerCamelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_lowerCAmelCase : Tuple = sort_objects(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] )
_lowerCAmelCase : Optional[Any] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_lowerCamelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_lowerCAmelCase : str = _re_bracket_content.sub(_replace , lines[1] )
else:
_lowerCAmelCase : Tuple = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : Dict = keys[:-1]
_lowerCAmelCase : Optional[Any] = get_indent(lines[1] ) + """, """.join([f'"{k}"' for k in sort_objects(_lowerCamelCase )] )
return "\n".join(_lowerCamelCase )
else:
# Finally we have to deal with imports fitting on one line
_lowerCAmelCase : Dict = _re_bracket_content.sub(_replace , _lowerCamelCase )
return import_statement
def _UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : Union[str, Any]=True ) -> List[str]:
with open(_lowerCamelCase , """r""" ) as f:
_lowerCAmelCase : Optional[Any] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_lowerCAmelCase : Optional[Any] = split_code_in_indented_blocks(
_lowerCamelCase , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_lowerCamelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_lowerCAmelCase : List[str] = main_blocks[block_idx]
_lowerCAmelCase : int = block.split("""\n""" )
# Get to the start of the imports.
_lowerCAmelCase : Any = 0
while line_idx < len(_lowerCamelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase )
else:
line_idx += 1
if line_idx >= len(_lowerCamelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
_lowerCAmelCase : Any = """\n""".join(block_lines[line_idx:-1] )
_lowerCAmelCase : Tuple = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_lowerCAmelCase : Optional[Any] = split_code_in_indented_blocks(_lowerCamelCase , indent_level=_lowerCamelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
_lowerCAmelCase : List[Any] = _re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_lowerCAmelCase : Tuple = [(pattern.search(_lowerCamelCase ).groups()[0] if pattern.search(_lowerCamelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_lowerCAmelCase : List[str] = [(i, key) for i, key in enumerate(_lowerCamelCase ) if key is not None]
_lowerCAmelCase : List[str] = [x[0] for x in sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : List[str] = []
for i in range(len(_lowerCamelCase ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
_lowerCAmelCase : Any = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(_lowerCamelCase )
count += 1
# And we put our main block back together with its first and last line.
_lowerCAmelCase : str = """\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(_lowerCamelCase ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(_lowerCamelCase , """w""" ) as f:
f.write("""\n""".join(_lowerCamelCase ) )
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any]=True ) -> Any:
_lowerCAmelCase : List[Any] = []
for root, _, files in os.walk(_lowerCamelCase ):
if "__init__.py" in files:
_lowerCAmelCase : List[Any] = sort_imports(os.path.join(_lowerCamelCase , """__init__.py""" ) , check_only=_lowerCamelCase )
if result:
_lowerCAmelCase : str = [os.path.join(_lowerCamelCase , """__init__.py""" )]
if len(_lowerCamelCase ) > 0:
raise ValueError(f'Would overwrite {len(_lowerCamelCase )} files, run `make style`.' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
UpperCamelCase_ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 309
| 1
|
'''simple docstring'''
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class a_ :
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=3_2 , snake_case_=2 , snake_case_=3 , snake_case_=1_6 , snake_case_=[1, 2, 1] , snake_case_=[2, 2, 4] , snake_case_=2 , snake_case_=2.0 , snake_case_=True , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_="gelu" , snake_case_=False , snake_case_=True , snake_case_=0.02 , snake_case_=1E-5 , snake_case_=True , snake_case_=None , snake_case_=True , snake_case_=1_0 , snake_case_=8 , snake_case_=["stage1", "stage2", "stage3"] , snake_case_=[1, 2, 3] , ):
_lowerCAmelCase : Any = parent
_lowerCAmelCase : str = batch_size
_lowerCAmelCase : List[Any] = image_size
_lowerCAmelCase : Union[str, Any] = patch_size
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : List[Any] = embed_dim
_lowerCAmelCase : Tuple = depths
_lowerCAmelCase : List[Any] = num_heads
_lowerCAmelCase : List[Any] = window_size
_lowerCAmelCase : Dict = mlp_ratio
_lowerCAmelCase : Tuple = qkv_bias
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCAmelCase : int = drop_path_rate
_lowerCAmelCase : List[str] = hidden_act
_lowerCAmelCase : Tuple = use_absolute_embeddings
_lowerCAmelCase : Any = patch_norm
_lowerCAmelCase : Any = layer_norm_eps
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Optional[Any] = is_training
_lowerCAmelCase : Optional[int] = scope
_lowerCAmelCase : Tuple = use_labels
_lowerCAmelCase : Any = type_sequence_label_size
_lowerCAmelCase : Optional[int] = encoder_stride
_lowerCAmelCase : List[Any] = out_features
_lowerCAmelCase : Optional[Any] = out_indices
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : Dict = None
if self.use_labels:
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : List[Any] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ):
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : Dict = MaskFormerSwinModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : Union[str, Any] = model(snake_case_ )
_lowerCAmelCase : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_lowerCAmelCase : Optional[int] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : int = MaskFormerSwinBackbone(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(snake_case_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [1_3, 1_6, 1_6, 1_6] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [1_6, 3_2, 6_4] )
# verify ValueError
with self.parent.assertRaises(snake_case_ ):
_lowerCAmelCase : Any = ["""stem"""]
_lowerCAmelCase : List[Any] = MaskFormerSwinBackbone(config=snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[Any] = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = config_and_inputs
_lowerCAmelCase : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a_ (_a , _a , unittest.TestCase ):
__lowerCAmelCase : Optional[int] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
__lowerCAmelCase : Tuple = {"""feature-extraction""": MaskFormerSwinModel} if is_torch_available() else {}
__lowerCAmelCase : Dict = False
__lowerCAmelCase : List[str] = False
__lowerCAmelCase : str = False
__lowerCAmelCase : int = False
__lowerCAmelCase : List[Any] = False
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = MaskFormerSwinModelTester(self )
_lowerCAmelCase : Any = ConfigTester(self , config_class=snake_case_ , embed_dim=3_7 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase ( self ):
return
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*snake_case_ )
@unittest.skip("""Swin does not use inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : int = model_class(snake_case_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case_ , nn.Linear ) )
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[Any] = model_class(snake_case_ )
_lowerCAmelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : List[str] = [*signature.parameters.keys()]
_lowerCAmelCase : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case_ )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : Any = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
_lowerCAmelCase : Any = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
_lowerCAmelCase : Tuple = outputs.hidden_states
_lowerCAmelCase : List[Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(snake_case_ ) , snake_case_ )
# Swin has a different seq_length
_lowerCAmelCase : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCAmelCase : Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_lowerCAmelCase : int = True
self.check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase : Optional[Any] = True
self.check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Optional[int] = 3
_lowerCAmelCase : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_lowerCAmelCase : Dict = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCAmelCase : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_lowerCAmelCase : Union[str, Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_lowerCAmelCase : List[str] = True
self.check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase : Any = True
self.check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(snake_case_ ):
_lowerCAmelCase : str = 0
return t
def check_equivalence(snake_case_ , snake_case_ , snake_case_ , snake_case_={} ):
with torch.no_grad():
_lowerCAmelCase : Optional[int] = model(**snake_case_ , return_dict=snake_case_ , **snake_case_ )
_lowerCAmelCase : int = model(**snake_case_ , return_dict=snake_case_ , **snake_case_ ).to_tuple()
def recursive_check(snake_case_ , snake_case_ ):
if isinstance(snake_case_ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(snake_case_ , snake_case_ ):
recursive_check(snake_case_ , snake_case_ )
elif isinstance(snake_case_ , snake_case_ ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(snake_case_ , snake_case_ )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(snake_case_ ) , set_nan_tensor_to_zero(snake_case_ ) , atol=1E-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
f' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'
f' {torch.isnan(snake_case_ ).any()} and `inf`: {torch.isinf(snake_case_ )}. Dict has'
f' `nan`: {torch.isnan(snake_case_ ).any()} and `inf`: {torch.isinf(snake_case_ )}.'
) , )
recursive_check(snake_case_ , snake_case_ )
for model_class in self.all_model_classes:
_lowerCAmelCase : Union[str, Any] = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : Optional[int] = self._prepare_for_class(snake_case_ , snake_case_ )
_lowerCAmelCase : List[Any] = self._prepare_for_class(snake_case_ , snake_case_ )
check_equivalence(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : Union[str, Any] = self._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
_lowerCAmelCase : Optional[Any] = self._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
check_equivalence(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : List[str] = self._prepare_for_class(snake_case_ , snake_case_ )
_lowerCAmelCase : Optional[int] = self._prepare_for_class(snake_case_ , snake_case_ )
check_equivalence(snake_case_ , snake_case_ , snake_case_ , {"""output_hidden_states""": True} )
_lowerCAmelCase : Optional[Any] = self._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
_lowerCAmelCase : Optional[int] = self._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
check_equivalence(snake_case_ , snake_case_ , snake_case_ , {"""output_hidden_states""": True} )
@require_torch
class a_ (unittest.TestCase , _a ):
__lowerCAmelCase : List[Any] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
__lowerCAmelCase : Any = MaskFormerSwinConfig
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = MaskFormerSwinModelTester(self )
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : str = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
_lowerCAmelCase : Tuple = backbone_class(snake_case_ )
backbone.to(snake_case_ )
backbone.eval()
_lowerCAmelCase : Optional[int] = backbone(**snake_case_ )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , snake_case_ )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
_lowerCAmelCase : str = backbone(**snake_case_ , output_hidden_states=snake_case_ )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
_lowerCAmelCase : int = backbone(**snake_case_ , output_attentions=snake_case_ )
self.assertIsNotNone(outputs.attentions )
| 309
|
'''simple docstring'''
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
UpperCamelCase_ = logging.get_logger(__name__)
class a_ :
def __init__( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : List[str] = question_encoder
_lowerCAmelCase : Optional[Any] = generator
_lowerCAmelCase : Optional[Any] = self.question_encoder
def __UpperCamelCase ( self , snake_case_ ):
if os.path.isfile(snake_case_ ):
raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
_lowerCAmelCase : Any = os.path.join(snake_case_ , """question_encoder_tokenizer""" )
_lowerCAmelCase : Tuple = os.path.join(snake_case_ , """generator_tokenizer""" )
self.question_encoder.save_pretrained(snake_case_ )
self.generator.save_pretrained(snake_case_ )
@classmethod
def __UpperCamelCase ( cls , snake_case_ , **snake_case_ ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
_lowerCAmelCase : Dict = kwargs.pop("""config""" , snake_case_ )
if config is None:
_lowerCAmelCase : List[Any] = RagConfig.from_pretrained(snake_case_ )
_lowerCAmelCase : int = AutoTokenizer.from_pretrained(
snake_case_ , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" )
_lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(
snake_case_ , config=config.generator , subfolder="""generator_tokenizer""" )
return cls(question_encoder=snake_case_ , generator=snake_case_ )
def __call__( self , *snake_case_ , **snake_case_ ):
return self.current_tokenizer(*snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , *snake_case_ , **snake_case_ ):
return self.generator.batch_decode(*snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , *snake_case_ , **snake_case_ ):
return self.generator.decode(*snake_case_ , **snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.question_encoder
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[Any] = self.generator
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = "longest" , snake_case_ = None , snake_case_ = True , **snake_case_ , ):
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" , snake_case_ , )
if max_length is None:
_lowerCAmelCase : Any = self.current_tokenizer.model_max_length
_lowerCAmelCase : List[Any] = self(
snake_case_ , add_special_tokens=snake_case_ , return_tensors=snake_case_ , max_length=snake_case_ , padding=snake_case_ , truncation=snake_case_ , **snake_case_ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
_lowerCAmelCase : List[str] = self.current_tokenizer.model_max_length
_lowerCAmelCase : List[str] = self(
text_target=snake_case_ , add_special_tokens=snake_case_ , return_tensors=snake_case_ , padding=snake_case_ , max_length=snake_case_ , truncation=snake_case_ , **snake_case_ , )
_lowerCAmelCase : Dict = labels["""input_ids"""]
return model_inputs
| 309
| 1
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class a_ (_a ):
__lowerCAmelCase : List[Any] = """microsoft/speecht5_tts"""
__lowerCAmelCase : List[Any] = (
"""This is a tool that reads an English text out loud. It takes an input named `text` which should contain the """
"""text to read (in English) and returns a waveform object containing the sound."""
)
__lowerCAmelCase : List[str] = """text_reader"""
__lowerCAmelCase : Optional[Any] = SpeechTaProcessor
__lowerCAmelCase : str = SpeechTaForTextToSpeech
__lowerCAmelCase : int = SpeechTaHifiGan
__lowerCAmelCase : int = ["""text"""]
__lowerCAmelCase : int = ["""audio"""]
def __UpperCamelCase ( self ):
if self.post_processor is None:
_lowerCAmelCase : int = """microsoft/speecht5_hifigan"""
super().setup()
def __UpperCamelCase ( self , snake_case_ , snake_case_=None ):
_lowerCAmelCase : Tuple = self.pre_processor(text=snake_case_ , return_tensors="""pt""" , truncation=snake_case_ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" )
_lowerCAmelCase : List[str] = load_dataset("""Matthijs/cmu-arctic-xvectors""" , split="""validation""" )
_lowerCAmelCase : Any = torch.tensor(embeddings_dataset[7_3_0_5]["""xvector"""] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def __UpperCamelCase ( self , snake_case_ ):
with torch.no_grad():
return self.model.generate_speech(**snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
with torch.no_grad():
return self.post_processor(snake_case_ ).cpu().detach()
| 309
|
'''simple docstring'''
# Algorithm for the pigeonhole sorting
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] ) -> List[Any]:
_lowerCAmelCase : List[Any] = min(_lowerCamelCase ) # min() finds the minimum value
_lowerCAmelCase : Tuple = max(_lowerCamelCase ) # max() finds the maximum value
_lowerCAmelCase : int = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
_lowerCAmelCase : Dict = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(_lowerCamelCase , _lowerCamelCase ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
_lowerCAmelCase : Any = 0
for count in range(_lowerCamelCase ):
while holes[count] > 0:
holes[count] -= 1
_lowerCAmelCase : Optional[int] = count + min_val
i += 1
def _UpperCAmelCase ( ) -> Optional[int]:
_lowerCAmelCase : Optional[int] = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(_lowerCamelCase )
print("""Sorted order is:""" , """ """.join(_lowerCamelCase ) )
if __name__ == "__main__":
main()
| 309
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
"""configuration_clipseg""": [
"""CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPSegConfig""",
"""CLIPSegTextConfig""",
"""CLIPSegVisionConfig""",
],
"""processing_clipseg""": ["""CLIPSegProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPSegModel""",
"""CLIPSegPreTrainedModel""",
"""CLIPSegTextModel""",
"""CLIPSegVisionModel""",
"""CLIPSegForImageSegmentation""",
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
|
'''simple docstring'''
def _UpperCAmelCase ( _lowerCamelCase : int = 1_00 ) -> int:
_lowerCAmelCase : Optional[Any] = (n * (n + 1) // 2) ** 2
_lowerCAmelCase : str = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F'{solution() = }')
| 309
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
"""configuration_x_clip""": [
"""XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XCLIPConfig""",
"""XCLIPTextConfig""",
"""XCLIPVisionConfig""",
],
"""processing_x_clip""": ["""XCLIPProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XCLIPModel""",
"""XCLIPPreTrainedModel""",
"""XCLIPTextModel""",
"""XCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FocalNetForImageClassification""",
"""FocalNetForMaskedImageModeling""",
"""FocalNetBackbone""",
"""FocalNetModel""",
"""FocalNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
| 1
|
'''simple docstring'''
def _UpperCAmelCase ( _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str] ) -> Optional[Any]:
_lowerCAmelCase : Dict = [0 for i in range(r + 1 )]
# nc0 = 1
_lowerCAmelCase : str = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
_lowerCAmelCase : Any = min(_lowerCamelCase , _lowerCamelCase )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 309
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
UpperCamelCase_ = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""DPTFeatureExtractor"""]
UpperCamelCase_ = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
| 1
|
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _UpperCAmelCase ( ) -> Tuple:
_lowerCAmelCase : List[Any] = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"""
_lowerCAmelCase : int = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert("""RGB""" )
return image
def _UpperCAmelCase ( _lowerCamelCase : Any ) -> Dict:
_lowerCAmelCase : str = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.weight', f'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.bias', f'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.weight', f'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.bias', f'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.qkv.weight', f'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.weight', f'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.bias', f'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.weight', f'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.bias', f'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.weight', f'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.bias', f'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") )
# fmt: on
return rename_keys
def _UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any] ) -> Optional[Any]:
_lowerCAmelCase : str = dct.pop(_lowerCamelCase )
_lowerCAmelCase : str = val
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple ) -> Tuple:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_lowerCAmelCase : Tuple = state_dict.pop(f'visual_encoder.blocks.{i}.attn.q_bias' )
_lowerCAmelCase : Optional[Any] = state_dict.pop(f'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
_lowerCAmelCase : int = torch.cat((q_bias, torch.zeros_like(_lowerCamelCase , requires_grad=_lowerCamelCase ), v_bias) )
_lowerCAmelCase : str = qkv_bias
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] ) -> List[Any]:
_lowerCAmelCase : str = 3_64 if """coco""" in model_name else 2_24
_lowerCAmelCase : str = BlipaVisionConfig(image_size=_lowerCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
_lowerCAmelCase : int = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=_lowerCamelCase ).to_dict()
elif "opt-6.7b" in model_name:
_lowerCAmelCase : Union[str, Any] = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=_lowerCamelCase ).to_dict()
elif "t5-xl" in model_name:
_lowerCAmelCase : Optional[int] = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_lowerCAmelCase : str = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
_lowerCAmelCase : Dict = BlipaConfig(vision_config=_lowerCamelCase , text_config=_lowerCamelCase )
return config, image_size
@torch.no_grad()
def _UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : List[Any]=None , _lowerCamelCase : int=False ) -> List[str]:
_lowerCAmelCase : int = (
AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" )
if """opt""" in model_name
else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" )
)
_lowerCAmelCase : List[Any] = tokenizer("""\n""" , add_special_tokens=_lowerCamelCase ).input_ids[0]
_lowerCAmelCase , _lowerCAmelCase : List[str] = get_blipa_config(_lowerCamelCase , eos_token_id=_lowerCamelCase )
_lowerCAmelCase : Optional[int] = BlipaForConditionalGeneration(_lowerCamelCase ).eval()
_lowerCAmelCase : Union[str, Any] = {
"""blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""),
"""blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""),
"""blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""),
"""blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""),
"""blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""),
"""blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""),
"""blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""),
}
_lowerCAmelCase , _lowerCAmelCase : List[str] = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
_lowerCAmelCase : Dict = """cuda""" if torch.cuda.is_available() else """cpu"""
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = load_model_and_preprocess(
name=_lowerCamelCase , model_type=_lowerCamelCase , is_eval=_lowerCamelCase , device=_lowerCamelCase )
original_model.eval()
print("""Done!""" )
# update state dict keys
_lowerCAmelCase : List[Any] = original_model.state_dict()
_lowerCAmelCase : Optional[int] = create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_lowerCAmelCase : Tuple = state_dict.pop(_lowerCamelCase )
if key.startswith("""Qformer.bert""" ):
_lowerCAmelCase : List[Any] = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
_lowerCAmelCase : Optional[int] = key.replace("""self""" , """attention""" )
if "opt_proj" in key:
_lowerCAmelCase : Dict = key.replace("""opt_proj""" , """language_projection""" )
if "t5_proj" in key:
_lowerCAmelCase : Tuple = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""opt""" ):
_lowerCAmelCase : List[Any] = key.replace("""opt""" , """language""" )
if key.startswith("""t5""" ):
_lowerCAmelCase : int = key.replace("""t5""" , """language""" )
_lowerCAmelCase : Tuple = val
# read in qv biases
read_in_q_v_bias(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = hf_model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert len(_lowerCamelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
_lowerCAmelCase : Union[str, Any] = load_demo_image()
_lowerCAmelCase : Optional[int] = vis_processors["""eval"""](_lowerCamelCase ).unsqueeze(0 ).to(_lowerCamelCase )
_lowerCAmelCase : List[str] = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(_lowerCamelCase )
# create processor
_lowerCAmelCase : Optional[int] = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=_lowerCamelCase , image_std=_lowerCamelCase )
_lowerCAmelCase : Tuple = BlipaProcessor(image_processor=_lowerCamelCase , tokenizer=_lowerCamelCase )
_lowerCAmelCase : Any = processor(images=_lowerCamelCase , return_tensors="""pt""" ).pixel_values.to(_lowerCamelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
original_model.to(_lowerCamelCase )
hf_model.to(_lowerCamelCase )
with torch.no_grad():
if "opt" in model_name:
_lowerCAmelCase : Optional[Any] = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits
_lowerCAmelCase : Optional[Any] = hf_model(_lowerCamelCase , _lowerCamelCase ).logits
else:
_lowerCAmelCase : List[Any] = original_model(
{"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits
_lowerCAmelCase : Tuple = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
_lowerCAmelCase : Dict = hf_model(_lowerCamelCase , _lowerCamelCase , labels=_lowerCamelCase ).logits
assert original_logits.shape == logits.shape
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
_lowerCAmelCase : Any = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=_lowerCamelCase )
assert torch.allclose(logits[0, :3, :3] , _lowerCamelCase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
_lowerCAmelCase : List[Any] = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=_lowerCamelCase )
else:
# cast to same type
_lowerCAmelCase : Union[str, Any] = logits.dtype
assert torch.allclose(original_logits.to(_lowerCamelCase ) , _lowerCamelCase , atol=1e-2 )
print("""Looks ok!""" )
print("""Generating a caption...""" )
_lowerCAmelCase : Optional[int] = """"""
_lowerCAmelCase : Union[str, Any] = tokenizer(_lowerCamelCase , return_tensors="""pt""" ).input_ids.to(_lowerCamelCase )
_lowerCAmelCase : List[Any] = original_model.generate({"""image""": original_pixel_values} )
_lowerCAmelCase : Dict = hf_model.generate(
_lowerCamelCase , _lowerCamelCase , do_sample=_lowerCamelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("""Original generation:""" , _lowerCamelCase )
_lowerCAmelCase : int = input_ids.shape[1]
_lowerCAmelCase : str = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowerCamelCase )
_lowerCAmelCase : List[str] = [text.strip() for text in output_text]
print("""HF generation:""" , _lowerCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if push_to_hub:
processor.push_to_hub(f'nielsr/{model_name}' )
hf_model.push_to_hub(f'nielsr/{model_name}' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
UpperCamelCase_ = [
"""blip2-opt-2.7b""",
"""blip2-opt-6.7b""",
"""blip2-opt-2.7b-coco""",
"""blip2-opt-6.7b-coco""",
"""blip2-flan-t5-xl""",
"""blip2-flan-t5-xl-coco""",
"""blip2-flan-t5-xxl""",
]
parser.add_argument(
"""--model_name""",
default="""blip2-opt-2.7b""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
UpperCamelCase_ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 309
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
def _UpperCAmelCase ( _lowerCamelCase : list[float] ) -> Dict:
return np.maximum(0 , _lowerCamelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 309
| 1
|
'''simple docstring'''
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
UpperCamelCase_ = """true"""
def _UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : Tuple=82 , _lowerCamelCase : List[Any]=16 ) -> Tuple:
set_seed(42 )
_lowerCAmelCase : Optional[Any] = RegressionModel()
_lowerCAmelCase : Tuple = deepcopy(_lowerCamelCase )
_lowerCAmelCase : List[str] = RegressionDataset(length=_lowerCamelCase )
_lowerCAmelCase : Dict = DataLoader(_lowerCamelCase , batch_size=_lowerCamelCase )
model.to(accelerator.device )
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
return model, ddp_model, dataloader
def _UpperCAmelCase ( _lowerCamelCase : Accelerator , _lowerCamelCase : List[Any]=False ) -> int:
_lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/mrpc-bert-base-cased""" )
_lowerCAmelCase : Optional[Any] = load_dataset("""glue""" , """mrpc""" , split="""validation""" )
def tokenize_function(_lowerCamelCase : Optional[Any] ):
_lowerCAmelCase : Any = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_lowerCamelCase , max_length=_lowerCamelCase )
return outputs
with accelerator.main_process_first():
_lowerCAmelCase : Tuple = dataset.map(
_lowerCamelCase , batched=_lowerCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
_lowerCAmelCase : List[str] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(_lowerCamelCase : Optional[Any] ):
if use_longest:
return tokenizer.pad(_lowerCamelCase , padding="""longest""" , return_tensors="""pt""" )
return tokenizer.pad(_lowerCamelCase , padding="""max_length""" , max_length=1_28 , return_tensors="""pt""" )
return DataLoader(_lowerCamelCase , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=16 )
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any] ) -> Optional[int]:
_lowerCAmelCase : List[str] = Accelerator(dispatch_batches=_lowerCamelCase , split_batches=_lowerCamelCase )
_lowerCAmelCase : Dict = get_dataloader(_lowerCamelCase , not dispatch_batches )
_lowerCAmelCase : List[str] = AutoModelForSequenceClassification.from_pretrained(
"""hf-internal-testing/mrpc-bert-base-cased""" , return_dict=_lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def _UpperCAmelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : Dict , _lowerCamelCase : Tuple ) -> str:
_lowerCAmelCase : Dict = []
for batch in dataloader:
_lowerCAmelCase , _lowerCAmelCase : List[str] = batch.values()
with torch.no_grad():
_lowerCAmelCase : Any = model(_lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Tuple = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = [], []
for logit, targ in logits_and_targets:
logits.append(_lowerCamelCase )
targs.append(_lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : int = torch.cat(_lowerCamelCase ), torch.cat(_lowerCamelCase )
return logits, targs
def _UpperCAmelCase ( _lowerCamelCase : Accelerator , _lowerCamelCase : Dict=82 , _lowerCamelCase : Optional[int]=False , _lowerCamelCase : Dict=False , _lowerCamelCase : Any=16 ) -> List[str]:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = get_basic_setup(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : int = generate_predictions(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
assert (
len(_lowerCamelCase ) == num_samples
), f'Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(_lowerCamelCase )}'
def _UpperCAmelCase ( _lowerCamelCase : bool = False , _lowerCamelCase : bool = False ) -> Dict:
_lowerCAmelCase : str = evaluate.load("""glue""" , """mrpc""" )
_lowerCAmelCase , _lowerCAmelCase : str = get_mrpc_setup(_lowerCamelCase , _lowerCamelCase )
# First do baseline
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = setup["""no"""]
model.to(_lowerCamelCase )
model.eval()
for batch in dataloader:
batch.to(_lowerCamelCase )
with torch.inference_mode():
_lowerCAmelCase : Tuple = model(**_lowerCamelCase )
_lowerCAmelCase : Dict = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=_lowerCamelCase , references=batch["""labels"""] )
_lowerCAmelCase : Union[str, Any] = metric.compute()
# Then do distributed
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = setup["""ddp"""]
model.eval()
for batch in dataloader:
with torch.inference_mode():
_lowerCAmelCase : Optional[Any] = model(**_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = outputs.logits.argmax(dim=-1 )
_lowerCAmelCase : List[str] = batch["""labels"""]
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=_lowerCamelCase , references=_lowerCamelCase )
_lowerCAmelCase : int = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f'Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'
def _UpperCAmelCase ( ) -> List[str]:
_lowerCAmelCase : Optional[Any] = Accelerator(split_batches=_lowerCamelCase , dispatch_batches=_lowerCamelCase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("""**Testing gather_for_metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`' )
test_mrpc(_lowerCamelCase , _lowerCamelCase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test torch metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
_lowerCAmelCase : Any = Accelerator(split_batches=_lowerCamelCase , dispatch_batches=_lowerCamelCase )
if accelerator.is_local_main_process:
print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99' )
test_torch_metrics(_lowerCamelCase , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test last batch is not dropped when perfectly divisible**""" )
_lowerCAmelCase : Tuple = Accelerator()
test_torch_metrics(_lowerCamelCase , 5_12 )
accelerator.state._reset_state()
def _UpperCAmelCase ( _lowerCamelCase : Dict ) -> List[str]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 309
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class a_ (_a ):
def __init__( self , *snake_case_ , **snake_case_ ):
warnings.warn(
"""The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DonutImageProcessor instead.""" , snake_case_ , )
super().__init__(*snake_case_ , **snake_case_ )
| 309
| 1
|
'''simple docstring'''
UpperCamelCase_ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def _UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple , _lowerCamelCase : int ) -> Dict:
# Return True if there is node that has not iterated.
_lowerCAmelCase : Optional[int] = [False] * len(_lowerCamelCase )
_lowerCAmelCase : int = [s]
_lowerCAmelCase : str = True
while queue:
_lowerCAmelCase : List[str] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = True
_lowerCAmelCase : str = u
return visited[t]
def _UpperCAmelCase ( _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] ) -> str:
_lowerCAmelCase : Tuple = [-1] * (len(_lowerCamelCase ))
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : str = []
_lowerCAmelCase : Union[str, Any] = [i[:] for i in graph] # Record original cut, copy.
while bfs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Optional[int] = float("""Inf""" )
_lowerCAmelCase : str = sink
while s != source:
# Find the minimum value in select path
_lowerCAmelCase : int = min(_lowerCamelCase , graph[parent[s]][s] )
_lowerCAmelCase : Dict = parent[s]
max_flow += path_flow
_lowerCAmelCase : Union[str, Any] = sink
while v != source:
_lowerCAmelCase : Optional[Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_lowerCAmelCase : List[Any] = parent[v]
for i in range(len(_lowerCamelCase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 309
|
'''simple docstring'''
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class a_ (_a ):
__lowerCAmelCase : Dict = (DPMSolverSDEScheduler,)
__lowerCAmelCase : Dict = 1_0
def __UpperCamelCase ( self , **snake_case_ ):
_lowerCAmelCase : List[Any] = {
"""num_train_timesteps""": 1_1_0_0,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""noise_sampler_seed""": 0,
}
config.update(**snake_case_ )
return config
def __UpperCamelCase ( self ):
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=snake_case_ )
def __UpperCamelCase ( self ):
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=snake_case_ , beta_end=snake_case_ )
def __UpperCamelCase ( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=snake_case_ )
def __UpperCamelCase ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.scheduler_classes[0]
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : Any = scheduler_class(**snake_case_ )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCAmelCase : Tuple = self.dummy_model()
_lowerCAmelCase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCAmelCase : Optional[Any] = sample.to(snake_case_ )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Union[str, Any] = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : Union[str, Any] = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Any = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : Dict = output.prev_sample
_lowerCAmelCase : List[Any] = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1E-2
assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1E-2
assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.scheduler_classes[0]
_lowerCAmelCase : Optional[Any] = self.get_scheduler_config(prediction_type="""v_prediction""" )
_lowerCAmelCase : Dict = scheduler_class(**snake_case_ )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCAmelCase : int = self.dummy_model()
_lowerCAmelCase : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCAmelCase : int = sample.to(snake_case_ )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : List[str] = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : List[Any] = model(snake_case_ , snake_case_ )
_lowerCAmelCase : str = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : int = output.prev_sample
_lowerCAmelCase : str = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Optional[int] = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1E-2
assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1E-2
assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1E-2
assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : str = scheduler_class(**snake_case_ )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case_ )
_lowerCAmelCase : Tuple = self.dummy_model()
_lowerCAmelCase : Optional[int] = self.dummy_sample_deter.to(snake_case_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_lowerCAmelCase : str = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : Dict = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Any = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : Dict = output.prev_sample
_lowerCAmelCase : List[Any] = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1E-2
assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1E-2
assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : Any = self.scheduler_classes[0]
_lowerCAmelCase : Optional[int] = self.get_scheduler_config()
_lowerCAmelCase : Tuple = scheduler_class(**snake_case_ , use_karras_sigmas=snake_case_ )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case_ )
_lowerCAmelCase : List[Any] = self.dummy_model()
_lowerCAmelCase : str = self.dummy_sample_deter.to(snake_case_ ) * scheduler.init_noise_sigma
_lowerCAmelCase : Optional[int] = sample.to(snake_case_ )
for t in scheduler.timesteps:
_lowerCAmelCase : List[str] = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : int = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Optional[int] = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : str = output.prev_sample
_lowerCAmelCase : Optional[Any] = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
| 309
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class a_ :
__lowerCAmelCase : int
__lowerCAmelCase : TreeNode | None = None
__lowerCAmelCase : TreeNode | None = None
UpperCamelCase_ = namedtuple("""CoinsDistribResult""", """moves excess""")
def _UpperCAmelCase ( _lowerCamelCase : TreeNode | None ) -> int:
if root is None:
return 0
# Validation
def count_nodes(_lowerCamelCase : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(_lowerCamelCase : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(_lowerCamelCase ) != count_coins(_lowerCamelCase ):
raise ValueError("""The nodes number should be same as the number of coins""" )
# Main calculation
def get_distrib(_lowerCamelCase : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
_lowerCAmelCase , _lowerCAmelCase : str = get_distrib(node.left )
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = get_distrib(node.right )
_lowerCAmelCase : int = 1 - left_distrib_excess
_lowerCAmelCase : Any = 1 - right_distrib_excess
_lowerCAmelCase : List[Any] = (
left_distrib_moves
+ right_distrib_moves
+ abs(_lowerCamelCase )
+ abs(_lowerCamelCase )
)
_lowerCAmelCase : Union[str, Any] = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(_lowerCamelCase , _lowerCamelCase )
return get_distrib(_lowerCamelCase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 309
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"""vocab_file""": """vocab.txt"""}
UpperCamelCase_ = {
"""vocab_file""": {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt""",
}
}
UpperCamelCase_ = {
"""YituTech/conv-bert-base""": 5_12,
"""YituTech/conv-bert-medium-small""": 5_12,
"""YituTech/conv-bert-small""": 5_12,
}
UpperCamelCase_ = {
"""YituTech/conv-bert-base""": {"""do_lower_case""": True},
"""YituTech/conv-bert-medium-small""": {"""do_lower_case""": True},
"""YituTech/conv-bert-small""": {"""do_lower_case""": True},
}
class a_ (_a ):
__lowerCAmelCase : Any = VOCAB_FILES_NAMES
__lowerCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Dict = PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Optional[int] = ConvBertTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=True , snake_case_="[UNK]" , snake_case_="[SEP]" , snake_case_="[PAD]" , snake_case_="[CLS]" , snake_case_="[MASK]" , snake_case_=True , snake_case_=None , **snake_case_ , ):
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
_lowerCAmelCase : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , snake_case_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , snake_case_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , snake_case_ ) != tokenize_chinese_chars
):
_lowerCAmelCase : Dict = getattr(snake_case_ , normalizer_state.pop("""type""" ) )
_lowerCAmelCase : List[str] = do_lower_case
_lowerCAmelCase : str = strip_accents
_lowerCAmelCase : List[Any] = tokenize_chinese_chars
_lowerCAmelCase : List[Any] = normalizer_class(**snake_case_ )
_lowerCAmelCase : str = do_lower_case
def __UpperCamelCase ( self , snake_case_ , snake_case_=None ):
_lowerCAmelCase : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : Optional[Any] = [self.sep_token_id]
_lowerCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : Any = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
| 309
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""microsoft/deberta-v2-xlarge""": """https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xxlarge""": """https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"""
),
"""microsoft/deberta-v2-xxlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"""
),
}
class a_ (_a ):
__lowerCAmelCase : List[Any] = """deberta-v2"""
def __init__( self , snake_case_=1_2_8_1_0_0 , snake_case_=1_5_3_6 , snake_case_=2_4 , snake_case_=2_4 , snake_case_=6_1_4_4 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_1_2 , snake_case_=0 , snake_case_=0.02 , snake_case_=1E-7 , snake_case_=False , snake_case_=-1 , snake_case_=0 , snake_case_=True , snake_case_=None , snake_case_=0 , snake_case_="gelu" , **snake_case_ , ):
super().__init__(**snake_case_ )
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : Optional[int] = num_hidden_layers
_lowerCAmelCase : Tuple = num_attention_heads
_lowerCAmelCase : List[str] = intermediate_size
_lowerCAmelCase : List[Any] = hidden_act
_lowerCAmelCase : str = hidden_dropout_prob
_lowerCAmelCase : int = attention_probs_dropout_prob
_lowerCAmelCase : List[Any] = max_position_embeddings
_lowerCAmelCase : Dict = type_vocab_size
_lowerCAmelCase : Dict = initializer_range
_lowerCAmelCase : Optional[int] = relative_attention
_lowerCAmelCase : Optional[Any] = max_relative_positions
_lowerCAmelCase : Optional[int] = pad_token_id
_lowerCAmelCase : Tuple = position_biased_input
# Backwards compatibility
if type(snake_case_ ) == str:
_lowerCAmelCase : Any = [x.strip() for x in pos_att_type.lower().split("""|""" )]
_lowerCAmelCase : List[Any] = pos_att_type
_lowerCAmelCase : Any = vocab_size
_lowerCAmelCase : Optional[int] = layer_norm_eps
_lowerCAmelCase : Dict = kwargs.get("""pooler_hidden_size""" , snake_case_ )
_lowerCAmelCase : Optional[Any] = pooler_dropout
_lowerCAmelCase : int = pooler_hidden_act
class a_ (_a ):
@property
def __UpperCamelCase ( self ):
if self.task == "multiple-choice":
_lowerCAmelCase : List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_lowerCAmelCase : Tuple = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def __UpperCamelCase ( self ):
return 1_2
def __UpperCamelCase ( self , snake_case_ , snake_case_ = -1 , snake_case_ = -1 , snake_case_ = -1 , snake_case_ = False , snake_case_ = None , snake_case_ = 3 , snake_case_ = 4_0 , snake_case_ = 4_0 , snake_case_ = None , ):
_lowerCAmelCase : Optional[Any] = super().generate_dummy_inputs(preprocessor=snake_case_ , framework=snake_case_ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 309
|
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a_ :
def __init__( self ):
_lowerCAmelCase : Any = """"""
_lowerCAmelCase : List[Any] = """"""
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : int = 0
_lowerCAmelCase : str = 2_5_6
_lowerCAmelCase : List[str] = 0
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Dict = 0
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : str = cva.imread(snake_case_ , 0 )
_lowerCAmelCase : List[str] = copy.deepcopy(self.img )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] , label="""x""" )
_lowerCAmelCase : List[Any] = np.sum(snake_case_ )
for i in range(len(snake_case_ ) ):
_lowerCAmelCase : Optional[int] = x[i] / self.k
self.sk += prk
_lowerCAmelCase : Any = (self.L - 1) * self.sk
if self.rem != 0:
_lowerCAmelCase : Dict = int(last % last )
_lowerCAmelCase : str = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(snake_case_ )
_lowerCAmelCase : str = int(np.ma.count(self.img ) / self.img[1].size )
_lowerCAmelCase : Union[str, Any] = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
_lowerCAmelCase : Union[str, Any] = self.img[j][i]
if num != self.last_list[num]:
_lowerCAmelCase : List[str] = self.last_list[num]
cva.imwrite("""output_data/output.jpg""" , self.img )
def __UpperCamelCase ( self ):
plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] )
def __UpperCamelCase ( self ):
cva.imshow("""Output-Image""" , self.img )
cva.imshow("""Input-Image""" , self.original_image )
cva.waitKey(5_0_0_0 )
cva.destroyAllWindows()
if __name__ == "__main__":
UpperCamelCase_ = os.path.join(os.path.basename(__file__), """image_data/input.jpg""")
UpperCamelCase_ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 309
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import _LazyModule
UpperCamelCase_ = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 309
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase_ = {
"""configuration_groupvit""": [
"""GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""GroupViTConfig""",
"""GroupViTOnnxConfig""",
"""GroupViTTextConfig""",
"""GroupViTVisionConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GroupViTModel""",
"""GroupViTPreTrainedModel""",
"""GroupViTTextModel""",
"""GroupViTVisionModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFGroupViTModel""",
"""TFGroupViTPreTrainedModel""",
"""TFGroupViTTextModel""",
"""TFGroupViTVisionModel""",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
| 1
|
'''simple docstring'''
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
UpperCamelCase_ = logging.get_logger(__name__)
class a_ (_a ):
__lowerCAmelCase : Any = ["""pixel_values"""]
def __init__( self , snake_case_ = True , snake_case_ = 1 / 2_5_5 , snake_case_ = True , snake_case_ = 8 , **snake_case_ , ):
super().__init__(**snake_case_ )
_lowerCAmelCase : str = do_rescale
_lowerCAmelCase : List[Any] = rescale_factor
_lowerCAmelCase : Optional[Any] = do_pad
_lowerCAmelCase : Any = pad_size
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ = None , **snake_case_ ):
return rescale(snake_case_ , scale=snake_case_ , data_format=snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ = None ):
_lowerCAmelCase , _lowerCAmelCase : int = get_image_size(snake_case_ )
_lowerCAmelCase : List[Any] = (old_height // size + 1) * size - old_height
_lowerCAmelCase : Dict = (old_width // size + 1) * size - old_width
return pad(snake_case_ , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=snake_case_ )
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = ChannelDimension.FIRST , **snake_case_ , ):
_lowerCAmelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase : int = do_pad if do_pad is not None else self.do_pad
_lowerCAmelCase : str = pad_size if pad_size is not None else self.pad_size
_lowerCAmelCase : int = make_list_of_images(snake_case_ )
if not valid_images(snake_case_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
_lowerCAmelCase : Tuple = [to_numpy_array(snake_case_ ) for image in images]
if do_rescale:
_lowerCAmelCase : Optional[Any] = [self.rescale(image=snake_case_ , scale=snake_case_ ) for image in images]
if do_pad:
_lowerCAmelCase : str = [self.pad(snake_case_ , size=snake_case_ ) for image in images]
_lowerCAmelCase : List[Any] = [to_channel_dimension_format(snake_case_ , snake_case_ ) for image in images]
_lowerCAmelCase : str = {"""pixel_values""": images}
return BatchFeature(data=snake_case_ , tensor_type=snake_case_ )
| 309
|
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
UpperCamelCase_ = 0
UpperCamelCase_ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
UpperCamelCase_ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
UpperCamelCase_ = tuple[int, int]
class a_ :
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
_lowerCAmelCase : Optional[int] = pos_x
_lowerCAmelCase : List[str] = pos_y
_lowerCAmelCase : Tuple = (pos_y, pos_x)
_lowerCAmelCase : List[Any] = goal_x
_lowerCAmelCase : int = goal_y
_lowerCAmelCase : Union[str, Any] = g_cost
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : List[Any] = self.calculate_heuristic()
_lowerCAmelCase : Optional[int] = self.g_cost + self.h_cost
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.pos_x - self.goal_x
_lowerCAmelCase : Optional[int] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(snake_case_ ) + abs(snake_case_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , snake_case_ ):
return self.f_cost < other.f_cost
class a_ :
def __init__( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : Optional[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , snake_case_ )
_lowerCAmelCase : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , snake_case_ )
_lowerCAmelCase : List[str] = [self.start]
_lowerCAmelCase : list[Node] = []
_lowerCAmelCase : List[str] = False
def __UpperCamelCase ( self ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
_lowerCAmelCase : Optional[int] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(snake_case_ )
self.closed_nodes.append(snake_case_ )
_lowerCAmelCase : Optional[int] = self.get_successors(snake_case_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(snake_case_ )
else:
# retrieve the best current path
_lowerCAmelCase : Optional[Any] = self.open_nodes.pop(self.open_nodes.index(snake_case_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(snake_case_ )
else:
self.open_nodes.append(snake_case_ )
return [self.start.pos]
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Union[str, Any] = []
for action in delta:
_lowerCAmelCase : Union[str, Any] = parent.pos_x + action[1]
_lowerCAmelCase : Dict = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(snake_case_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
snake_case_ , snake_case_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , snake_case_ , ) )
return successors
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : List[Any] = node
_lowerCAmelCase : Optional[Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_lowerCAmelCase : Optional[int] = current_node.parent
path.reverse()
return path
class a_ :
def __init__( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : List[str] = AStar(snake_case_ , snake_case_ )
_lowerCAmelCase : int = AStar(snake_case_ , snake_case_ )
_lowerCAmelCase : Optional[int] = False
def __UpperCamelCase ( self ):
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
_lowerCAmelCase : Tuple = self.fwd_astar.open_nodes.pop(0 )
_lowerCAmelCase : Optional[Any] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
snake_case_ , snake_case_ )
self.fwd_astar.closed_nodes.append(snake_case_ )
self.bwd_astar.closed_nodes.append(snake_case_ )
_lowerCAmelCase : List[str] = current_bwd_node
_lowerCAmelCase : Dict = current_fwd_node
_lowerCAmelCase : Any = {
self.fwd_astar: self.fwd_astar.get_successors(snake_case_ ),
self.bwd_astar: self.bwd_astar.get_successors(snake_case_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(snake_case_ )
else:
# retrieve the best current path
_lowerCAmelCase : List[Any] = astar.open_nodes.pop(
astar.open_nodes.index(snake_case_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(snake_case_ )
else:
astar.open_nodes.append(snake_case_ )
return [self.fwd_astar.start.pos]
def __UpperCamelCase ( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : int = self.fwd_astar.retrace_path(snake_case_ )
_lowerCAmelCase : Optional[Any] = self.bwd_astar.retrace_path(snake_case_ )
bwd_path.pop()
bwd_path.reverse()
_lowerCAmelCase : Dict = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
UpperCamelCase_ = (0, 0)
UpperCamelCase_ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
UpperCamelCase_ = time.time()
UpperCamelCase_ = AStar(init, goal)
UpperCamelCase_ = a_star.search()
UpperCamelCase_ = time.time() - start_time
print(F'AStar execution time = {end_time:f} seconds')
UpperCamelCase_ = time.time()
UpperCamelCase_ = BidirectionalAStar(init, goal)
UpperCamelCase_ = time.time() - bd_start_time
print(F'BidirectionalAStar execution time = {bd_end_time:f} seconds')
| 309
| 1
|
'''simple docstring'''
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class a_ (_a ):
__lowerCAmelCase : Dict = (DPMSolverSDEScheduler,)
__lowerCAmelCase : Dict = 1_0
def __UpperCamelCase ( self , **snake_case_ ):
_lowerCAmelCase : List[Any] = {
"""num_train_timesteps""": 1_1_0_0,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""noise_sampler_seed""": 0,
}
config.update(**snake_case_ )
return config
def __UpperCamelCase ( self ):
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=snake_case_ )
def __UpperCamelCase ( self ):
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=snake_case_ , beta_end=snake_case_ )
def __UpperCamelCase ( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=snake_case_ )
def __UpperCamelCase ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.scheduler_classes[0]
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : Any = scheduler_class(**snake_case_ )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCAmelCase : Tuple = self.dummy_model()
_lowerCAmelCase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCAmelCase : Optional[Any] = sample.to(snake_case_ )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Union[str, Any] = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : Union[str, Any] = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Any = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : Dict = output.prev_sample
_lowerCAmelCase : List[Any] = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1E-2
assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1E-2
assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.scheduler_classes[0]
_lowerCAmelCase : Optional[Any] = self.get_scheduler_config(prediction_type="""v_prediction""" )
_lowerCAmelCase : Dict = scheduler_class(**snake_case_ )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCAmelCase : int = self.dummy_model()
_lowerCAmelCase : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCAmelCase : int = sample.to(snake_case_ )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : List[str] = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : List[Any] = model(snake_case_ , snake_case_ )
_lowerCAmelCase : str = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : int = output.prev_sample
_lowerCAmelCase : str = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Optional[int] = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1E-2
assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1E-2
assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1E-2
assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : str = scheduler_class(**snake_case_ )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case_ )
_lowerCAmelCase : Tuple = self.dummy_model()
_lowerCAmelCase : Optional[int] = self.dummy_sample_deter.to(snake_case_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_lowerCAmelCase : str = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : Dict = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Any = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : Dict = output.prev_sample
_lowerCAmelCase : List[Any] = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1E-2
assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1E-2
assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : Any = self.scheduler_classes[0]
_lowerCAmelCase : Optional[int] = self.get_scheduler_config()
_lowerCAmelCase : Tuple = scheduler_class(**snake_case_ , use_karras_sigmas=snake_case_ )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case_ )
_lowerCAmelCase : List[Any] = self.dummy_model()
_lowerCAmelCase : str = self.dummy_sample_deter.to(snake_case_ ) * scheduler.init_noise_sigma
_lowerCAmelCase : Optional[int] = sample.to(snake_case_ )
for t in scheduler.timesteps:
_lowerCAmelCase : List[str] = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : int = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Optional[int] = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : str = output.prev_sample
_lowerCAmelCase : Optional[Any] = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
| 309
|
'''simple docstring'''
def _UpperCAmelCase ( _lowerCamelCase : list[int] , _lowerCamelCase : str ) -> list[int]:
_lowerCAmelCase : List[Any] = int(_lowerCamelCase )
# Initialize Result
_lowerCAmelCase : Any = []
# Traverse through all denomination
for denomination in reversed(_lowerCamelCase ):
# Find denominations
while int(_lowerCamelCase ) >= int(_lowerCamelCase ):
total_value -= int(_lowerCamelCase )
answer.append(_lowerCamelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
UpperCamelCase_ = []
UpperCamelCase_ = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
UpperCamelCase_ = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F'Denomination {i}: ').strip()))
UpperCamelCase_ = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
UpperCamelCase_ = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
UpperCamelCase_ = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F'Following is minimal change for {value}: ')
UpperCamelCase_ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 309
| 1
|
'''simple docstring'''
from __future__ import annotations
import math
def _UpperCAmelCase ( _lowerCamelCase : list , _lowerCamelCase : list ) -> list:
if len(_lowerCamelCase ) != 2 or len(a[0] ) != 2 or len(_lowerCamelCase ) != 2 or len(b[0] ) != 2:
raise Exception("""Matrices are not 2x2""" )
_lowerCAmelCase : List[str] = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def _UpperCAmelCase ( _lowerCamelCase : list , _lowerCamelCase : list ) -> Union[str, Any]:
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(_lowerCamelCase ) )
]
def _UpperCAmelCase ( _lowerCamelCase : list , _lowerCamelCase : list ) -> List[str]:
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(_lowerCamelCase ) )
]
def _UpperCAmelCase ( _lowerCamelCase : list ) -> tuple[list, list, list, list]:
if len(_lowerCamelCase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("""Odd matrices are not supported!""" )
_lowerCAmelCase : Optional[Any] = len(_lowerCamelCase )
_lowerCAmelCase : Dict = matrix_length // 2
_lowerCAmelCase : Dict = [[a[i][j] for j in range(_lowerCamelCase , _lowerCamelCase )] for i in range(_lowerCamelCase )]
_lowerCAmelCase : Tuple = [
[a[i][j] for j in range(_lowerCamelCase , _lowerCamelCase )] for i in range(_lowerCamelCase , _lowerCamelCase )
]
_lowerCAmelCase : Union[str, Any] = [[a[i][j] for j in range(_lowerCamelCase )] for i in range(_lowerCamelCase )]
_lowerCAmelCase : Any = [[a[i][j] for j in range(_lowerCamelCase )] for i in range(_lowerCamelCase , _lowerCamelCase )]
return top_left, top_right, bot_left, bot_right
def _UpperCAmelCase ( _lowerCamelCase : list ) -> tuple[int, int]:
return len(_lowerCamelCase ), len(matrix[0] )
def _UpperCAmelCase ( _lowerCamelCase : list ) -> None:
print("""\n""".join(str(_lowerCamelCase ) for line in matrix ) )
def _UpperCAmelCase ( _lowerCamelCase : list , _lowerCamelCase : list ) -> list:
if matrix_dimensions(_lowerCamelCase ) == (2, 2):
return default_matrix_multiplication(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = split_matrix(_lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = split_matrix(_lowerCamelCase )
_lowerCAmelCase : Any = actual_strassen(_lowerCamelCase , matrix_subtraction(_lowerCamelCase , _lowerCamelCase ) )
_lowerCAmelCase : List[Any] = actual_strassen(matrix_addition(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
_lowerCAmelCase : Tuple = actual_strassen(matrix_addition(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
_lowerCAmelCase : str = actual_strassen(_lowerCamelCase , matrix_subtraction(_lowerCamelCase , _lowerCamelCase ) )
_lowerCAmelCase : Optional[Any] = actual_strassen(matrix_addition(_lowerCamelCase , _lowerCamelCase ) , matrix_addition(_lowerCamelCase , _lowerCamelCase ) )
_lowerCAmelCase : Dict = actual_strassen(matrix_subtraction(_lowerCamelCase , _lowerCamelCase ) , matrix_addition(_lowerCamelCase , _lowerCamelCase ) )
_lowerCAmelCase : int = actual_strassen(matrix_subtraction(_lowerCamelCase , _lowerCamelCase ) , matrix_addition(_lowerCamelCase , _lowerCamelCase ) )
_lowerCAmelCase : Dict = matrix_addition(matrix_subtraction(matrix_addition(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase ) , _lowerCamelCase )
_lowerCAmelCase : List[Any] = matrix_addition(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = matrix_addition(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Tuple = matrix_subtraction(matrix_subtraction(matrix_addition(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase ) , _lowerCamelCase )
# construct the new matrix from our 4 quadrants
_lowerCAmelCase : List[Any] = []
for i in range(len(_lowerCamelCase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(_lowerCamelCase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def _UpperCAmelCase ( _lowerCamelCase : list , _lowerCamelCase : list ) -> list:
if matrix_dimensions(_lowerCamelCase )[1] != matrix_dimensions(_lowerCamelCase )[0]:
_lowerCAmelCase : Optional[Any] = (
"""Unable to multiply these matrices, please check the dimensions.\n"""
f'Matrix A: {matrixa}\n'
f'Matrix B: {matrixa}'
)
raise Exception(_lowerCamelCase )
_lowerCAmelCase : List[Any] = matrix_dimensions(_lowerCamelCase )
_lowerCAmelCase : Tuple = matrix_dimensions(_lowerCamelCase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
_lowerCAmelCase : Optional[int] = max(*_lowerCamelCase , *_lowerCamelCase )
_lowerCAmelCase : Tuple = int(math.pow(2 , math.ceil(math.loga(_lowerCamelCase ) ) ) )
_lowerCAmelCase : Any = matrixa
_lowerCAmelCase : int = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , _lowerCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , _lowerCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , _lowerCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
_lowerCAmelCase : Optional[Any] = actual_strassen(_lowerCamelCase , _lowerCamelCase )
# Removing the additional zeros
for i in range(0 , _lowerCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , _lowerCamelCase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
UpperCamelCase_ = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
UpperCamelCase_ = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 309
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase_ = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""EncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""TFEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""FlaxEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
| 1
|
'''simple docstring'''
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
UpperCamelCase_ = re.compile(r"""\b(a|an|the)\b""", re.UNICODE)
UpperCamelCase_ = None
def _UpperCAmelCase ( ) -> Tuple:
_lowerCAmelCase : List[str] = argparse.ArgumentParser("""Official evaluation script for SQuAD version 2.0.""" )
parser.add_argument("""data_file""" , metavar="""data.json""" , help="""Input data JSON file.""" )
parser.add_argument("""pred_file""" , metavar="""pred.json""" , help="""Model predictions.""" )
parser.add_argument(
"""--out-file""" , """-o""" , metavar="""eval.json""" , help="""Write accuracy metrics to file (default is stdout).""" )
parser.add_argument(
"""--na-prob-file""" , """-n""" , metavar="""na_prob.json""" , help="""Model estimates of probability of no answer.""" )
parser.add_argument(
"""--na-prob-thresh""" , """-t""" , type=_lowerCamelCase , default=1.0 , help="""Predict \"\" if no-answer probability exceeds this (default = 1.0).""" , )
parser.add_argument(
"""--out-image-dir""" , """-p""" , metavar="""out_images""" , default=_lowerCamelCase , help="""Save precision-recall curves to directory.""" )
parser.add_argument("""--verbose""" , """-v""" , action="""store_true""" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def _UpperCAmelCase ( _lowerCamelCase : Tuple ) -> int:
_lowerCAmelCase : List[Any] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
_lowerCAmelCase : Optional[Any] = bool(qa["""answers"""]["""text"""] )
return qid_to_has_ans
def _UpperCAmelCase ( _lowerCamelCase : Optional[int] ) -> Optional[Any]:
def remove_articles(_lowerCamelCase : List[Any] ):
return ARTICLES_REGEX.sub(""" """ , _lowerCamelCase )
def white_space_fix(_lowerCamelCase : List[str] ):
return " ".join(text.split() )
def remove_punc(_lowerCamelCase : Optional[int] ):
_lowerCAmelCase : Any = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowerCamelCase : int ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowerCamelCase ) ) ) )
def _UpperCAmelCase ( _lowerCamelCase : Any ) -> Union[str, Any]:
if not s:
return []
return normalize_answer(_lowerCamelCase ).split()
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : int ) -> List[Any]:
return int(normalize_answer(_lowerCamelCase ) == normalize_answer(_lowerCamelCase ) )
def _UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : List[Any] ) -> Optional[int]:
_lowerCAmelCase : int = get_tokens(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = get_tokens(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = collections.Counter(_lowerCamelCase ) & collections.Counter(_lowerCamelCase )
_lowerCAmelCase : List[Any] = sum(common.values() )
if len(_lowerCamelCase ) == 0 or len(_lowerCamelCase ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
_lowerCAmelCase : List[Any] = 1.0 * num_same / len(_lowerCamelCase )
_lowerCAmelCase : Dict = 1.0 * num_same / len(_lowerCamelCase )
_lowerCAmelCase : List[str] = (2 * precision * recall) / (precision + recall)
return fa
def _UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : Optional[int] ) -> Dict:
_lowerCAmelCase : Optional[int] = {}
_lowerCAmelCase : Union[str, Any] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
_lowerCAmelCase : List[Any] = qa["""id"""]
_lowerCAmelCase : List[str] = [t for t in qa["""answers"""]["""text"""] if normalize_answer(_lowerCamelCase )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
_lowerCAmelCase : List[Any] = [""""""]
if qid not in preds:
print(f'Missing prediction for {qid}' )
continue
_lowerCAmelCase : Optional[Any] = preds[qid]
# Take max over all gold answers
_lowerCAmelCase : List[Any] = max(compute_exact(_lowerCamelCase , _lowerCamelCase ) for a in gold_answers )
_lowerCAmelCase : Dict = max(compute_fa(_lowerCamelCase , _lowerCamelCase ) for a in gold_answers )
return exact_scores, fa_scores
def _UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : List[str] , _lowerCamelCase : int , _lowerCamelCase : Optional[int] ) -> Any:
_lowerCAmelCase : List[str] = {}
for qid, s in scores.items():
_lowerCAmelCase : int = na_probs[qid] > na_prob_thresh
if pred_na:
_lowerCAmelCase : Any = float(not qid_to_has_ans[qid] )
else:
_lowerCAmelCase : Optional[int] = s
return new_scores
def _UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[Any]=None ) -> Optional[Any]:
if not qid_list:
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase )
return collections.OrderedDict(
[
("""exact""", 100.0 * sum(exact_scores.values() ) / total),
("""f1""", 100.0 * sum(fa_scores.values() ) / total),
("""total""", total),
] )
else:
_lowerCAmelCase : Union[str, Any] = len(_lowerCamelCase )
return collections.OrderedDict(
[
("""exact""", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("""f1""", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("""total""", total),
] )
def _UpperCAmelCase ( _lowerCamelCase : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : int ) -> Any:
for k in new_eval:
_lowerCAmelCase : str = new_eval[k]
def _UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Any , _lowerCamelCase : List[Any] ) -> Optional[int]:
plt.step(_lowerCamelCase , _lowerCamelCase , color="""b""" , alpha=0.2 , where="""post""" )
plt.fill_between(_lowerCamelCase , _lowerCamelCase , step="""post""" , alpha=0.2 , color="""b""" )
plt.xlabel("""Recall""" )
plt.ylabel("""Precision""" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(_lowerCamelCase )
plt.savefig(_lowerCamelCase )
plt.clf()
def _UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : Dict=None , _lowerCamelCase : Optional[int]=None ) -> Optional[Any]:
_lowerCAmelCase : Tuple = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : na_probs[k] )
_lowerCAmelCase : List[Any] = 0.0
_lowerCAmelCase : str = 1.0
_lowerCAmelCase : Any = 0.0
_lowerCAmelCase : List[str] = [1.0]
_lowerCAmelCase : Tuple = [0.0]
_lowerCAmelCase : List[str] = 0.0
for i, qid in enumerate(_lowerCamelCase ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
_lowerCAmelCase : Optional[int] = true_pos / float(i + 1 )
_lowerCAmelCase : List[str] = true_pos / float(_lowerCamelCase )
if i == len(_lowerCamelCase ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(_lowerCamelCase )
recalls.append(_lowerCamelCase )
if out_image:
plot_pr_curve(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return {"ap": 100.0 * avg_prec}
def _UpperCAmelCase ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Tuple , _lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[str] , _lowerCamelCase : int ) -> List[Any]:
if out_image_dir and not os.path.exists(_lowerCamelCase ):
os.makedirs(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
_lowerCAmelCase : str = make_precision_recall_eval(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , out_image=os.path.join(_lowerCamelCase , """pr_exact.png""" ) , title="""Precision-Recall curve for Exact Match score""" , )
_lowerCAmelCase : Tuple = make_precision_recall_eval(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , out_image=os.path.join(_lowerCamelCase , """pr_f1.png""" ) , title="""Precision-Recall curve for F1 score""" , )
_lowerCAmelCase : int = {k: float(_lowerCamelCase ) for k, v in qid_to_has_ans.items()}
_lowerCAmelCase : Optional[int] = make_precision_recall_eval(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , out_image=os.path.join(_lowerCamelCase , """pr_oracle.png""" ) , title="""Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)""" , )
merge_eval(_lowerCamelCase , _lowerCamelCase , """pr_exact""" )
merge_eval(_lowerCamelCase , _lowerCamelCase , """pr_f1""" )
merge_eval(_lowerCamelCase , _lowerCamelCase , """pr_oracle""" )
def _UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : Optional[int] ) -> Any:
if not qid_list:
return
_lowerCAmelCase : List[Any] = [na_probs[k] for k in qid_list]
_lowerCAmelCase : Optional[int] = np.ones_like(_lowerCamelCase ) / float(len(_lowerCamelCase ) )
plt.hist(_lowerCamelCase , weights=_lowerCamelCase , bins=20 , range=(0.0, 1.0) )
plt.xlabel("""Model probability of no-answer""" )
plt.ylabel("""Proportion of dataset""" )
plt.title(f'Histogram of no-answer probability: {name}' )
plt.savefig(os.path.join(_lowerCamelCase , f'na_prob_hist_{name}.png' ) )
plt.clf()
def _UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : int ) -> List[Any]:
_lowerCAmelCase : str = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
_lowerCAmelCase : str = num_no_ans
_lowerCAmelCase : Union[str, Any] = cur_score
_lowerCAmelCase : Optional[Any] = 0.0
_lowerCAmelCase : Dict = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : na_probs[k] )
for i, qid in enumerate(_lowerCamelCase ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
_lowerCAmelCase : Union[str, Any] = scores[qid]
else:
if preds[qid]:
_lowerCAmelCase : Optional[Any] = -1
else:
_lowerCAmelCase : str = 0
cur_score += diff
if cur_score > best_score:
_lowerCAmelCase : Union[str, Any] = cur_score
_lowerCAmelCase : List[str] = na_probs[qid]
return 100.0 * best_score / len(_lowerCamelCase ), best_thresh
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple , _lowerCamelCase : int ) -> Optional[int]:
_lowerCAmelCase , _lowerCAmelCase : List[str] = find_best_thresh(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = find_best_thresh(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = best_exact
_lowerCAmelCase : List[str] = exact_thresh
_lowerCAmelCase : int = best_fa
_lowerCAmelCase : int = fa_thresh
def _UpperCAmelCase ( ) -> Optional[int]:
with open(OPTS.data_file ) as f:
_lowerCAmelCase : Any = json.load(_lowerCamelCase )
_lowerCAmelCase : List[Any] = dataset_json["""data"""]
with open(OPTS.pred_file ) as f:
_lowerCAmelCase : Optional[Any] = json.load(_lowerCamelCase )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
_lowerCAmelCase : str = json.load(_lowerCamelCase )
else:
_lowerCAmelCase : Dict = {k: 0.0 for k in preds}
_lowerCAmelCase : List[str] = make_qid_to_has_ans(_lowerCamelCase ) # maps qid to True/False
_lowerCAmelCase : Union[str, Any] = [k for k, v in qid_to_has_ans.items() if v]
_lowerCAmelCase : List[str] = [k for k, v in qid_to_has_ans.items() if not v]
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = get_raw_scores(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : List[Any] = apply_no_ans_threshold(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , OPTS.na_prob_thresh )
_lowerCAmelCase : Dict = apply_no_ans_threshold(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , OPTS.na_prob_thresh )
_lowerCAmelCase : Dict = make_eval_dict(_lowerCamelCase , _lowerCamelCase )
if has_ans_qids:
_lowerCAmelCase : Optional[Any] = make_eval_dict(_lowerCamelCase , _lowerCamelCase , qid_list=_lowerCamelCase )
merge_eval(_lowerCamelCase , _lowerCamelCase , """HasAns""" )
if no_ans_qids:
_lowerCAmelCase : Union[str, Any] = make_eval_dict(_lowerCamelCase , _lowerCamelCase , qid_list=_lowerCamelCase )
merge_eval(_lowerCamelCase , _lowerCamelCase , """NoAns""" )
if OPTS.na_prob_file:
find_all_best_thresh(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , OPTS.out_image_dir )
histogram_na_prob(_lowerCamelCase , _lowerCamelCase , OPTS.out_image_dir , """hasAns""" )
histogram_na_prob(_lowerCamelCase , _lowerCamelCase , OPTS.out_image_dir , """noAns""" )
if OPTS.out_file:
with open(OPTS.out_file , """w""" ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase )
else:
print(json.dumps(_lowerCamelCase , indent=2 ) )
if __name__ == "__main__":
UpperCamelCase_ = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("""Agg""")
import matplotlib.pyplot as plt
main()
| 309
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class a_ (unittest.TestCase ):
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = """laion/clap-htsat-unfused"""
_lowerCAmelCase : int = tempfile.mkdtemp()
def __UpperCamelCase ( self , **snake_case_ ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **snake_case_ )
def __UpperCamelCase ( self , **snake_case_ ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **snake_case_ )
def __UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : List[Any] = self.get_feature_extractor()
_lowerCAmelCase : Union[str, Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : Any = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_lowerCAmelCase : int = self.get_feature_extractor(do_normalize=snake_case_ , padding_value=1.0 )
_lowerCAmelCase : Dict = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = self.get_feature_extractor()
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
_lowerCAmelCase : Union[str, Any] = floats_list((3, 1_0_0_0) )
_lowerCAmelCase : List[str] = feature_extractor(snake_case_ , return_tensors="""np""" )
_lowerCAmelCase : Optional[Any] = processor(audios=snake_case_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = self.get_feature_extractor()
_lowerCAmelCase : List[str] = self.get_tokenizer()
_lowerCAmelCase : Tuple = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
_lowerCAmelCase : Union[str, Any] = """This is a test string"""
_lowerCAmelCase : Union[str, Any] = processor(text=snake_case_ )
_lowerCAmelCase : Optional[int] = tokenizer(snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = self.get_feature_extractor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
_lowerCAmelCase : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase : List[Any] = processor.batch_decode(snake_case_ )
_lowerCAmelCase : Dict = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.get_feature_extractor()
_lowerCAmelCase : Dict = self.get_tokenizer()
_lowerCAmelCase : Optional[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 309
| 1
|
'''simple docstring'''
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
UpperCamelCase_ = ["""small""", """medium""", """large"""]
UpperCamelCase_ = """lm_head.decoder.weight"""
UpperCamelCase_ = """lm_head.weight"""
def _UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : str ) -> List[Any]:
_lowerCAmelCase : Tuple = torch.load(_lowerCamelCase )
_lowerCAmelCase : Tuple = d.pop(_lowerCamelCase )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
torch.save(_lowerCamelCase , os.path.join(_lowerCamelCase , _lowerCamelCase ) )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("""--dialogpt_path""", default=""".""", type=str)
UpperCamelCase_ = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
UpperCamelCase_ = os.path.join(args.dialogpt_path, F'{MODEL}_ft.pkl')
UpperCamelCase_ = F'./DialoGPT-{MODEL}'
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 309
|
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = """▁"""
UpperCamelCase_ = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
UpperCamelCase_ = {
"""vocab_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json""",
},
"""spm_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model""",
},
"""tokenizer_config_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json""",
},
}
UpperCamelCase_ = {
"""facebook/m2m100_418M""": 10_24,
}
# fmt: off
UpperCamelCase_ = {
"""m2m100""": ["""af""", """am""", """ar""", """ast""", """az""", """ba""", """be""", """bg""", """bn""", """br""", """bs""", """ca""", """ceb""", """cs""", """cy""", """da""", """de""", """el""", """en""", """es""", """et""", """fa""", """ff""", """fi""", """fr""", """fy""", """ga""", """gd""", """gl""", """gu""", """ha""", """he""", """hi""", """hr""", """ht""", """hu""", """hy""", """id""", """ig""", """ilo""", """is""", """it""", """ja""", """jv""", """ka""", """kk""", """km""", """kn""", """ko""", """lb""", """lg""", """ln""", """lo""", """lt""", """lv""", """mg""", """mk""", """ml""", """mn""", """mr""", """ms""", """my""", """ne""", """nl""", """no""", """ns""", """oc""", """or""", """pa""", """pl""", """ps""", """pt""", """ro""", """ru""", """sd""", """si""", """sk""", """sl""", """so""", """sq""", """sr""", """ss""", """su""", """sv""", """sw""", """ta""", """th""", """tl""", """tn""", """tr""", """uk""", """ur""", """uz""", """vi""", """wo""", """xh""", """yi""", """yo""", """zh""", """zu"""],
"""wmt21""": ["""en""", """ha""", """is""", """ja""", """cs""", """ru""", """zh""", """de"""]
}
class a_ (_a ):
__lowerCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
__lowerCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Dict = ["""input_ids""", """attention_mask"""]
__lowerCAmelCase : List[int] = []
__lowerCAmelCase : List[int] = []
def __init__( self , snake_case_ , snake_case_ , snake_case_=None , snake_case_=None , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<pad>" , snake_case_="<unk>" , snake_case_="m2m100" , snake_case_ = None , snake_case_=8 , **snake_case_ , ):
_lowerCAmelCase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCAmelCase : Optional[Any] = language_codes
_lowerCAmelCase : Tuple = FAIRSEQ_LANGUAGE_CODES[language_codes]
_lowerCAmelCase : str = {lang_code: f'__{lang_code}__' for lang_code in fairseq_language_code}
_lowerCAmelCase : int = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(snake_case_ )
for lang_code in fairseq_language_code
if self.get_lang_token(snake_case_ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=snake_case_ , tgt_lang=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , language_codes=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=snake_case_ , **snake_case_ , )
_lowerCAmelCase : Optional[int] = vocab_file
_lowerCAmelCase : Any = load_json(snake_case_ )
_lowerCAmelCase : str = {v: k for k, v in self.encoder.items()}
_lowerCAmelCase : Union[str, Any] = spm_file
_lowerCAmelCase : Tuple = load_spm(snake_case_ , self.sp_model_kwargs )
_lowerCAmelCase : int = len(self.encoder )
_lowerCAmelCase : Union[str, Any] = {
self.get_lang_token(snake_case_ ): self.encoder_size + i for i, lang_code in enumerate(snake_case_ )
}
_lowerCAmelCase : List[str] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(snake_case_ )}
_lowerCAmelCase : Optional[Any] = {v: k for k, v in self.lang_token_to_id.items()}
_lowerCAmelCase : Any = src_lang if src_lang is not None else """en"""
_lowerCAmelCase : Optional[int] = tgt_lang
_lowerCAmelCase : Tuple = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
_lowerCAmelCase : List[Any] = num_madeup_words
@property
def __UpperCamelCase ( self ):
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def __UpperCamelCase ( self ):
return self._src_lang
@src_lang.setter
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCamelCase ( self , snake_case_ ):
return self.sp_model.encode(snake_case_ , out_type=snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(snake_case_ , self.encoder[self.unk_token] )
def __UpperCamelCase ( self , snake_case_ ):
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(snake_case_ , self.unk_token )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = []
_lowerCAmelCase : Optional[int] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(snake_case_ ) + token
_lowerCAmelCase : Optional[Any] = []
else:
current_sub_tokens.append(snake_case_ )
out_string += self.sp_model.decode(snake_case_ )
return out_string.strip()
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
_lowerCAmelCase : List[Any] = [1] * len(self.prefix_tokens )
_lowerCAmelCase : Dict = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(snake_case_ )) + suffix_ones
return prefix_ones + ([0] * len(snake_case_ )) + ([0] * len(snake_case_ )) + suffix_ones
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
_lowerCAmelCase : int = self.__dict__.copy()
_lowerCAmelCase : str = None
return state
def __setstate__( self , snake_case_ ):
_lowerCAmelCase : List[str] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase : str = {}
_lowerCAmelCase : str = load_spm(self.spm_file , self.sp_model_kwargs )
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : Dict = Path(snake_case_ )
if not save_dir.is_dir():
raise OSError(f'{save_directory} should be a directory' )
_lowerCAmelCase : Any = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
_lowerCAmelCase : Any = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , snake_case_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , snake_case_ )
elif not os.path.isfile(self.spm_file ):
with open(snake_case_ , """wb""" ) as fi:
_lowerCAmelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (str(snake_case_ ), str(snake_case_ ))
def __UpperCamelCase ( self , snake_case_ , snake_case_ = "en" , snake_case_ = None , snake_case_ = "ro" , **snake_case_ , ):
_lowerCAmelCase : Union[str, Any] = src_lang
_lowerCAmelCase : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(snake_case_ , snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
_lowerCAmelCase : Dict = src_lang
_lowerCAmelCase : str = self(snake_case_ , add_special_tokens=snake_case_ , **snake_case_ )
_lowerCAmelCase : Union[str, Any] = self.get_lang_id(snake_case_ )
_lowerCAmelCase : Tuple = tgt_lang_id
return inputs
def __UpperCamelCase ( self ):
self.set_src_lang_special_tokens(self.src_lang )
def __UpperCamelCase ( self ):
self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Optional[Any] = self.get_lang_token(snake_case_ )
_lowerCAmelCase : List[Any] = self.lang_token_to_id[lang_token]
_lowerCAmelCase : Any = [self.cur_lang_id]
_lowerCAmelCase : Any = [self.eos_token_id]
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = self.get_lang_token(snake_case_ )
_lowerCAmelCase : int = self.lang_token_to_id[lang_token]
_lowerCAmelCase : str = [self.cur_lang_id]
_lowerCAmelCase : str = [self.eos_token_id]
def __UpperCamelCase ( self , snake_case_ ):
return self.lang_code_to_token[lang]
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : List[str] = self.get_lang_token(snake_case_ )
return self.lang_token_to_id[lang_token]
def _UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
_lowerCAmelCase : Optional[Any] = sentencepiece.SentencePieceProcessor(**_lowerCamelCase )
spm.Load(str(_lowerCamelCase ) )
return spm
def _UpperCAmelCase ( _lowerCamelCase : str ) -> Union[Dict, List]:
with open(_lowerCamelCase , """r""" ) as f:
return json.load(_lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : str ) -> None:
with open(_lowerCamelCase , """w""" ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase , indent=2 )
| 309
| 1
|
'''simple docstring'''
from __future__ import annotations
UpperCamelCase_ = """#"""
class a_ :
def __init__( self ):
_lowerCAmelCase : dict = {}
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : str = self._trie
for char in text:
if char not in trie:
_lowerCAmelCase : Optional[Any] = {}
_lowerCAmelCase : Dict = trie[char]
_lowerCAmelCase : Union[str, Any] = True
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : List[str] = self._trie
for char in prefix:
if char in trie:
_lowerCAmelCase : int = trie[char]
else:
return []
return self._elements(snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : int = []
for c, v in d.items():
_lowerCAmelCase : Optional[Any] = [""" """] if c == END else [(c + s) for s in self._elements(snake_case_ )]
result.extend(snake_case_ )
return tuple(snake_case_ )
UpperCamelCase_ = Trie()
UpperCamelCase_ = ("""depart""", """detergent""", """daring""", """dog""", """deer""", """deal""")
for word in words:
trie.insert_word(word)
def _UpperCAmelCase ( _lowerCamelCase : str ) -> tuple:
_lowerCAmelCase : Optional[int] = trie.find_word(_lowerCamelCase )
return tuple(string + word for word in suffixes )
def _UpperCAmelCase ( ) -> None:
print(autocomplete_using_trie("""de""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 309
|
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def _UpperCAmelCase ( _lowerCamelCase : Callable , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ) -> np.ndarray:
_lowerCAmelCase : Union[str, Any] = int(np.ceil((x_end - xa) / step_size ) )
_lowerCAmelCase : Tuple = np.zeros((n + 1,) )
_lowerCAmelCase : List[Any] = ya
_lowerCAmelCase : int = xa
for k in range(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = y[k] + step_size * ode_func(_lowerCamelCase , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 309
| 1
|
'''simple docstring'''
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
UpperCamelCase_ = True
except ImportError:
UpperCamelCase_ = False
try:
from torch.hub import _get_torch_home
UpperCamelCase_ = _get_torch_home()
except ImportError:
UpperCamelCase_ = os.path.expanduser(
os.getenv("""TORCH_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """torch"""))
)
UpperCamelCase_ = os.path.join(torch_cache_home, """transformers""")
UpperCamelCase_ = """https://cdn.huggingface.co"""
UpperCamelCase_ = """https://s3.amazonaws.com/models.huggingface.co/bert"""
UpperCamelCase_ = """/""".join(str(Path(__file__).resolve()).split("""/""")[:-1])
UpperCamelCase_ = os.path.join(PATH, """config.yaml""")
UpperCamelCase_ = os.path.join(PATH, """attributes.txt""")
UpperCamelCase_ = os.path.join(PATH, """objects.txt""")
UpperCamelCase_ = os.getenv("""PYTORCH_PRETRAINED_BERT_CACHE""", default_cache_path)
UpperCamelCase_ = os.getenv("""PYTORCH_TRANSFORMERS_CACHE""", PYTORCH_PRETRAINED_BERT_CACHE)
UpperCamelCase_ = os.getenv("""TRANSFORMERS_CACHE""", PYTORCH_TRANSFORMERS_CACHE)
UpperCamelCase_ = """pytorch_model.bin"""
UpperCamelCase_ = """config.yaml"""
def _UpperCAmelCase ( _lowerCamelCase : Any=OBJECTS , _lowerCamelCase : Dict=ATTRIBUTES ) -> Optional[int]:
_lowerCAmelCase : List[Any] = []
with open(_lowerCamelCase ) as f:
for object in f.readlines():
vg_classes.append(object.split(""",""" )[0].lower().strip() )
_lowerCAmelCase : Union[str, Any] = []
with open(_lowerCamelCase ) as f:
for object in f.readlines():
vg_attrs.append(object.split(""",""" )[0].lower().strip() )
return vg_classes, vg_attrs
def _UpperCAmelCase ( _lowerCamelCase : Tuple ) -> Optional[int]:
_lowerCAmelCase : Tuple = OrderedDict()
with open(_lowerCamelCase , """rb""" ) as f:
_lowerCAmelCase : int = pkl.load(_lowerCamelCase )["""model"""]
for k in copy.deepcopy(list(ckp.keys() ) ):
_lowerCAmelCase : Union[str, Any] = ckp.pop(_lowerCamelCase )
if isinstance(_lowerCamelCase , np.ndarray ):
_lowerCAmelCase : List[str] = torch.tensor(_lowerCamelCase )
else:
assert isinstance(_lowerCamelCase , torch.tensor ), type(_lowerCamelCase )
_lowerCAmelCase : Any = v
return r
class a_ :
__lowerCAmelCase : Dict = {}
def __init__( self , snake_case_ , snake_case_ = "root" , snake_case_=0 ):
_lowerCAmelCase : int = name
_lowerCAmelCase : Any = level
_lowerCAmelCase : Optional[Any] = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
_lowerCAmelCase : Union[str, Any] = copy.deepcopy(snake_case_ )
_lowerCAmelCase : List[Any] = copy.deepcopy(snake_case_ )
if isinstance(snake_case_ , snake_case_ ):
_lowerCAmelCase : List[Any] = Config(snake_case_ , name=snake_case_ , level=level + 1 )
_lowerCAmelCase : str = v
setattr(self , snake_case_ , snake_case_ )
_lowerCAmelCase : Optional[Any] = d
def __repr__( self ):
return str(list((self._pointer.keys()) ) )
def __setattr__( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : int = val
_lowerCAmelCase : Optional[Any] = val
_lowerCAmelCase : Optional[int] = key.split(""".""" )
_lowerCAmelCase : Union[str, Any] = len(snake_case_ ) - 1
_lowerCAmelCase : str = self._pointer
if len(snake_case_ ) > 1:
for i, l in enumerate(snake_case_ ):
if hasattr(self , snake_case_ ) and isinstance(getattr(self , snake_case_ ) , snake_case_ ):
setattr(getattr(self , snake_case_ ) , """.""".join(levels[i:] ) , snake_case_ )
if l == last_level:
_lowerCAmelCase : Optional[Any] = val
else:
_lowerCAmelCase : int = pointer[l]
def __UpperCamelCase ( self ):
return self._pointer
def __UpperCamelCase ( self , snake_case_ , snake_case_ ):
with open(f'{file_name}' , """w""" ) as stream:
dump(snake_case_ , snake_case_ )
def __UpperCamelCase ( self , snake_case_ , snake_case_ ):
with open(f'{file_name}' , """w""" ) as stream:
json.dump(snake_case_ , snake_case_ )
@staticmethod
def __UpperCamelCase ( snake_case_ ):
with open(snake_case_ ) as stream:
_lowerCAmelCase : List[str] = load(snake_case_ , Loader=snake_case_ )
return data
def __str__( self ):
_lowerCAmelCase : Any = """ """
if self._name != "root":
_lowerCAmelCase : int = f'{t * (self._level-1)}{self._name}:\n'
else:
_lowerCAmelCase : str = """"""
_lowerCAmelCase : str = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(snake_case_ , snake_case_ ):
r += f'{t * (self._level)}{v}\n'
self._level += 1
else:
r += f'{t * (self._level)}{k}: {v} ({type(snake_case_ ).__name__})\n'
_lowerCAmelCase : int = level
return r[:-1]
@classmethod
def __UpperCamelCase ( cls , snake_case_ , **snake_case_ ):
_lowerCAmelCase , _lowerCAmelCase : str = cls.get_config_dict(snake_case_ , **snake_case_ )
return cls(snake_case_ )
@classmethod
def __UpperCamelCase ( cls , snake_case_ , **snake_case_ ):
_lowerCAmelCase : Tuple = kwargs.pop("""cache_dir""" , snake_case_ )
_lowerCAmelCase : List[str] = kwargs.pop("""force_download""" , snake_case_ )
_lowerCAmelCase : Optional[int] = kwargs.pop("""resume_download""" , snake_case_ )
_lowerCAmelCase : List[str] = kwargs.pop("""proxies""" , snake_case_ )
_lowerCAmelCase : int = kwargs.pop("""local_files_only""" , snake_case_ )
if os.path.isdir(snake_case_ ):
_lowerCAmelCase : int = os.path.join(snake_case_ , snake_case_ )
elif os.path.isfile(snake_case_ ) or is_remote_url(snake_case_ ):
_lowerCAmelCase : List[Any] = pretrained_model_name_or_path
else:
_lowerCAmelCase : int = hf_bucket_url(snake_case_ , filename=snake_case_ , use_cdn=snake_case_ )
try:
# Load from URL or cache if already cached
_lowerCAmelCase : Tuple = cached_path(
snake_case_ , cache_dir=snake_case_ , force_download=snake_case_ , proxies=snake_case_ , resume_download=snake_case_ , local_files_only=snake_case_ , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
_lowerCAmelCase : Any = Config.load_yaml(snake_case_ )
except EnvironmentError:
_lowerCAmelCase : List[str] = """Can't load config for"""
raise EnvironmentError(snake_case_ )
if resolved_config_file == config_file:
print("""loading configuration file from path""" )
else:
print("""loading configuration file cache""" )
return Config.load_yaml(snake_case_ ), kwargs
def _UpperCAmelCase ( _lowerCamelCase : List[str] ) -> Tuple:
_lowerCAmelCase : Any = torch.load("""dump.pt""" , map_location=in_tensor.device )
_lowerCAmelCase : str = in_tensor.numpy()
_lowerCAmelCase : Optional[int] = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(_lowerCamelCase , _lowerCamelCase , rtol=0.01 , atol=0.1 ), (
f'{sum([1 for x in np.isclose(_lowerCamelCase , _lowerCamelCase , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*1_00:.4f} %'
" element-wise mismatch"
)
raise Exception("""tensors are all good""" )
# Hugging face functions below
def _UpperCAmelCase ( _lowerCamelCase : Optional[int] ) -> int:
_lowerCAmelCase : Optional[Any] = urlparse(_lowerCamelCase )
return parsed.scheme in ("http", "https")
def _UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : List[str]=True ) -> str:
_lowerCAmelCase : Union[str, Any] = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
_lowerCAmelCase : Optional[Any] = """/""" not in model_id
if legacy_format:
return f'{endpoint}/{model_id}-{filename}'
else:
return f'{endpoint}/{model_id}/{filename}'
def _UpperCAmelCase ( _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any] , _lowerCamelCase : Dict=None , _lowerCamelCase : Dict=0 , _lowerCamelCase : str=None , ) -> str:
_lowerCAmelCase : Any = """python/{}""".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
ua += "; " + "; ".join("""{}/{}""".format(_lowerCamelCase , _lowerCamelCase ) for k, v in user_agent.items() )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
ua += "; " + user_agent
_lowerCAmelCase : Optional[Any] = {"""user-agent""": ua}
if resume_size > 0:
_lowerCAmelCase : List[str] = """bytes=%d-""" % (resume_size,)
_lowerCAmelCase : List[str] = requests.get(_lowerCamelCase , stream=_lowerCamelCase , proxies=_lowerCamelCase , headers=_lowerCamelCase )
if response.status_code == 4_16: # Range not satisfiable
return
_lowerCAmelCase : Tuple = response.headers.get("""Content-Length""" )
_lowerCAmelCase : List[Any] = resume_size + int(_lowerCamelCase ) if content_length is not None else None
_lowerCAmelCase : int = tqdm(
unit="""B""" , unit_scale=_lowerCamelCase , total=_lowerCamelCase , initial=_lowerCamelCase , desc="""Downloading""" , )
for chunk in response.iter_content(chunk_size=10_24 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(_lowerCamelCase ) )
temp_file.write(_lowerCamelCase )
progress.close()
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict=None , _lowerCamelCase : str=False , _lowerCamelCase : Tuple=None , _lowerCamelCase : List[str]=10 , _lowerCamelCase : int=False , _lowerCamelCase : Dict=None , _lowerCamelCase : List[str]=False , ) -> Dict:
if cache_dir is None:
_lowerCAmelCase : str = TRANSFORMERS_CACHE
if isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : int = str(_lowerCamelCase )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
_lowerCAmelCase : int = None
if not local_files_only:
try:
_lowerCAmelCase : Tuple = requests.head(_lowerCamelCase , allow_redirects=_lowerCamelCase , proxies=_lowerCamelCase , timeout=_lowerCamelCase )
if response.status_code == 2_00:
_lowerCAmelCase : Tuple = response.headers.get("""ETag""" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
_lowerCAmelCase : int = url_to_filename(_lowerCamelCase , _lowerCamelCase )
# get cache path to put the file
_lowerCAmelCase : List[Any] = os.path.join(_lowerCamelCase , _lowerCamelCase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(_lowerCamelCase ):
return cache_path
else:
_lowerCAmelCase : Any = [
file
for file in fnmatch.filter(os.listdir(_lowerCamelCase ) , filename + """.*""" )
if not file.endswith(""".json""" ) and not file.endswith(""".lock""" )
]
if len(_lowerCamelCase ) > 0:
return os.path.join(_lowerCamelCase , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"""Cannot find the requested files in the cached path and outgoing traffic has been"""
""" disabled. To enable model look-ups and downloads online, set 'local_files_only'"""
""" to False.""" )
return None
# From now on, etag is not None.
if os.path.exists(_lowerCamelCase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
_lowerCAmelCase : Union[str, Any] = cache_path + """.lock"""
with FileLock(_lowerCamelCase ):
# If the download just completed while the lock was activated.
if os.path.exists(_lowerCamelCase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
_lowerCAmelCase : Optional[int] = cache_path + """.incomplete"""
@contextmanager
def _resumable_file_manager():
with open(_lowerCamelCase , """a+b""" ) as f:
yield f
_lowerCAmelCase : Optional[int] = _resumable_file_manager
if os.path.exists(_lowerCamelCase ):
_lowerCAmelCase : Dict = os.stat(_lowerCamelCase ).st_size
else:
_lowerCAmelCase : Tuple = 0
else:
_lowerCAmelCase : Dict = partial(tempfile.NamedTemporaryFile , dir=_lowerCamelCase , delete=_lowerCamelCase )
_lowerCAmelCase : Dict = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"""%s not found in cache or force_download set to True, downloading to %s""" , _lowerCamelCase , temp_file.name , )
http_get(
_lowerCamelCase , _lowerCamelCase , proxies=_lowerCamelCase , resume_size=_lowerCamelCase , user_agent=_lowerCamelCase , )
os.replace(temp_file.name , _lowerCamelCase )
_lowerCAmelCase : List[str] = {"""url""": url, """etag""": etag}
_lowerCAmelCase : Optional[Any] = cache_path + """.json"""
with open(_lowerCamelCase , """w""" ) as meta_file:
json.dump(_lowerCamelCase , _lowerCamelCase )
return cache_path
def _UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : int=None ) -> List[str]:
_lowerCAmelCase : Dict = url.encode("""utf-8""" )
_lowerCAmelCase : str = shaaaa(_lowerCamelCase )
_lowerCAmelCase : List[Any] = url_hash.hexdigest()
if etag:
_lowerCAmelCase : Optional[int] = etag.encode("""utf-8""" )
_lowerCAmelCase : str = shaaaa(_lowerCamelCase )
filename += "." + etag_hash.hexdigest()
if url.endswith(""".h5""" ):
filename += ".h5"
return filename
def _UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : Any=None , _lowerCamelCase : str=False , _lowerCamelCase : List[str]=None , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : Any=None , _lowerCamelCase : Optional[Any]=False , _lowerCamelCase : Tuple=False , _lowerCamelCase : Any=False , ) -> List[str]:
if cache_dir is None:
_lowerCAmelCase : Union[str, Any] = TRANSFORMERS_CACHE
if isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Any = str(_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : List[str] = str(_lowerCamelCase )
if is_remote_url(_lowerCamelCase ):
# URL, so get it from the cache (downloading if necessary)
_lowerCAmelCase : Dict = get_from_cache(
_lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , proxies=_lowerCamelCase , resume_download=_lowerCamelCase , user_agent=_lowerCamelCase , local_files_only=_lowerCamelCase , )
elif os.path.exists(_lowerCamelCase ):
# File, and it exists.
_lowerCAmelCase : Dict = url_or_filename
elif urlparse(_lowerCamelCase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("""file {} not found""".format(_lowerCamelCase ) )
else:
# Something unknown
raise ValueError("""unable to parse {} as a URL or as a local path""".format(_lowerCamelCase ) )
if extract_compressed_file:
if not is_zipfile(_lowerCamelCase ) and not tarfile.is_tarfile(_lowerCamelCase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
_lowerCAmelCase , _lowerCAmelCase : Tuple = os.path.split(_lowerCamelCase )
_lowerCAmelCase : Any = output_file.replace(""".""" , """-""" ) + """-extracted"""
_lowerCAmelCase : Union[str, Any] = os.path.join(_lowerCamelCase , _lowerCamelCase )
if os.path.isdir(_lowerCamelCase ) and os.listdir(_lowerCamelCase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
_lowerCAmelCase : Union[str, Any] = output_path + """.lock"""
with FileLock(_lowerCamelCase ):
shutil.rmtree(_lowerCamelCase , ignore_errors=_lowerCamelCase )
os.makedirs(_lowerCamelCase )
if is_zipfile(_lowerCamelCase ):
with ZipFile(_lowerCamelCase , """r""" ) as zip_file:
zip_file.extractall(_lowerCamelCase )
zip_file.close()
elif tarfile.is_tarfile(_lowerCamelCase ):
_lowerCAmelCase : Any = tarfile.open(_lowerCamelCase )
tar_file.extractall(_lowerCamelCase )
tar_file.close()
else:
raise EnvironmentError("""Archive format of {} could not be identified""".format(_lowerCamelCase ) )
return output_path_extracted
return output_path
def _UpperCAmelCase ( _lowerCamelCase : Any , _lowerCamelCase : int="," ) -> Tuple:
assert isinstance(_lowerCamelCase , _lowerCamelCase )
if os.path.isfile(_lowerCamelCase ):
with open(_lowerCamelCase ) as f:
_lowerCAmelCase : int = eval(f.read() )
else:
_lowerCAmelCase : Tuple = requests.get(_lowerCamelCase )
try:
_lowerCAmelCase : List[str] = requests.json()
except Exception:
_lowerCAmelCase : int = req.content.decode()
assert data is not None, "could not connect"
try:
_lowerCAmelCase : Tuple = eval(_lowerCamelCase )
except Exception:
_lowerCAmelCase : Optional[Any] = data.split("""\n""" )
req.close()
return data
def _UpperCAmelCase ( _lowerCamelCase : Tuple ) -> Optional[int]:
_lowerCAmelCase : List[Any] = requests.get(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = np.array(Image.open(BytesIO(response.content ) ) )
return img
def _UpperCAmelCase ( _lowerCamelCase : Tuple ) -> int:
_lowerCAmelCase : Optional[int] = url.split("""/""" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(_lowerCamelCase )
with open(_lowerCamelCase , """rb""" ) as stream:
_lowerCAmelCase : int = pkl.load(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = weights.pop("""model""" )
_lowerCAmelCase : List[str] = {}
for k, v in model.items():
_lowerCAmelCase : Optional[Any] = torch.from_numpy(_lowerCamelCase )
if "running_var" in k:
_lowerCAmelCase : int = torch.tensor([0] )
_lowerCAmelCase : str = k.replace("""running_var""" , """num_batches_tracked""" )
_lowerCAmelCase : Tuple = zero
return new
def _UpperCAmelCase ( ) -> int:
print(f'{os.path.abspath(os.path.join(_lowerCamelCase , os.pardir ) )}/demo.ipynb' )
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict="RGB" ) -> Optional[int]:
assert isinstance(_lowerCamelCase , _lowerCamelCase )
if os.path.isfile(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = cva.imread(_lowerCamelCase )
else:
_lowerCAmelCase : str = get_image_from_url(_lowerCamelCase )
assert img is not None, f'could not connect to: {im}'
_lowerCAmelCase : Any = cva.cvtColor(_lowerCamelCase , cva.COLOR_BGR2RGB )
if input_format == "RGB":
_lowerCAmelCase : Tuple = img[:, :, ::-1]
return img
def _UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : Union[str, Any]=1 ) -> List[str]:
return (images[i : i + batch] for i in range(0 , len(_lowerCamelCase ) , _lowerCamelCase ))
| 309
|
'''simple docstring'''
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("""socket.socket""" )
@patch("""builtins.open""" )
def _UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] ) -> Union[str, Any]:
# ===== initialization =====
_lowerCAmelCase : Tuple = Mock()
_lowerCAmelCase : Any = conn, Mock()
_lowerCAmelCase : Optional[Any] = iter([1, None] )
_lowerCAmelCase : str = lambda _lowerCamelCase : next(_lowerCamelCase )
# ===== invoke =====
send_file(filename="""mytext.txt""" , testing=_lowerCamelCase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 309
| 1
|
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
UpperCamelCase_ = numpy.array([0, 0])
UpperCamelCase_ = numpy.array([0.5, 0.8_6_6_0_2_5_4])
UpperCamelCase_ = numpy.array([1, 0])
UpperCamelCase_ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def _UpperCAmelCase ( _lowerCamelCase : list[numpy.ndarray] , _lowerCamelCase : int ) -> list[numpy.ndarray]:
_lowerCAmelCase : Dict = initial_vectors
for _ in range(_lowerCamelCase ):
_lowerCAmelCase : Any = iteration_step(_lowerCamelCase )
return vectors
def _UpperCAmelCase ( _lowerCamelCase : list[numpy.ndarray] ) -> list[numpy.ndarray]:
_lowerCAmelCase : Union[str, Any] = []
for i, start_vector in enumerate(vectors[:-1] ):
_lowerCAmelCase : Optional[int] = vectors[i + 1]
new_vectors.append(_lowerCamelCase )
_lowerCAmelCase : Dict = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def _UpperCAmelCase ( _lowerCamelCase : numpy.ndarray , _lowerCamelCase : float ) -> numpy.ndarray:
_lowerCAmelCase : int = numpy.radians(_lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Tuple = numpy.cos(_lowerCamelCase ), numpy.sin(_lowerCamelCase )
_lowerCAmelCase : List[Any] = numpy.array(((c, -s), (s, c)) )
return numpy.dot(_lowerCamelCase , _lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : list[numpy.ndarray] ) -> None:
_lowerCAmelCase : List[Any] = plt.gca()
axes.set_aspect("""equal""" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_lowerCAmelCase , _lowerCAmelCase : Tuple = zip(*_lowerCamelCase )
plt.plot(_lowerCamelCase , _lowerCamelCase )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase_ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 309
|
'''simple docstring'''
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a_ :
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=3_0 , snake_case_=2 , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=3_2 , snake_case_=5 , snake_case_=4 , snake_case_=3_7 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=1_0 , snake_case_=0.02 , snake_case_=None , ):
_lowerCAmelCase : Optional[Any] = parent
_lowerCAmelCase : Any = batch_size
_lowerCAmelCase : Tuple = image_size
_lowerCAmelCase : int = patch_size
_lowerCAmelCase : Any = num_channels
_lowerCAmelCase : str = is_training
_lowerCAmelCase : Any = use_labels
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : int = num_hidden_layers
_lowerCAmelCase : Dict = num_attention_heads
_lowerCAmelCase : Union[str, Any] = intermediate_size
_lowerCAmelCase : Dict = hidden_act
_lowerCAmelCase : str = hidden_dropout_prob
_lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
_lowerCAmelCase : Any = type_sequence_label_size
_lowerCAmelCase : str = initializer_range
_lowerCAmelCase : Optional[Any] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase : List[Any] = (image_size // patch_size) ** 2
_lowerCAmelCase : Dict = num_patches + 1
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : List[str] = None
if self.use_labels:
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ):
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : List[Any] = ViTMSNModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : Tuple = self.type_sequence_label_size
_lowerCAmelCase : int = ViTMSNForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : Optional[int] = model(snake_case_ , labels=snake_case_ )
print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" )
print("""Labels: {labels}""" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCAmelCase : int = 1
_lowerCAmelCase : List[str] = ViTMSNForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[int] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = config_and_inputs
_lowerCAmelCase : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a_ (_a , _a , unittest.TestCase ):
__lowerCAmelCase : Tuple = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__lowerCAmelCase : Optional[int] = (
{"""feature-extraction""": ViTMSNModel, """image-classification""": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__lowerCAmelCase : Dict = False
__lowerCAmelCase : Optional[Any] = False
__lowerCAmelCase : List[str] = False
__lowerCAmelCase : Any = False
def __UpperCamelCase ( self ):
_lowerCAmelCase : Tuple = ViTMSNModelTester(self )
_lowerCAmelCase : int = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMSN does not use inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : List[str] = model_class(snake_case_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case_ , nn.Linear ) )
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[int] = model_class(snake_case_ )
_lowerCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Optional[Any] = [*signature.parameters.keys()]
_lowerCAmelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@slow
def __UpperCamelCase ( self ):
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Optional[int] = ViTMSNModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def _UpperCAmelCase ( ) -> Tuple:
_lowerCAmelCase : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a_ (unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self ):
return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None
@slow
def __UpperCamelCase ( self ):
torch.manual_seed(2 )
_lowerCAmelCase : Dict = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(snake_case_ )
_lowerCAmelCase : Dict = self.default_image_processor
_lowerCAmelCase : Any = prepare_img()
_lowerCAmelCase : List[str] = image_processor(images=snake_case_ , return_tensors="""pt""" ).to(snake_case_ )
# forward pass
with torch.no_grad():
_lowerCAmelCase : Dict = model(**snake_case_ )
# verify the logits
_lowerCAmelCase : Dict = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case_ )
_lowerCAmelCase : Tuple = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1E-4 ) )
| 309
| 1
|
'''simple docstring'''
import argparse
import os
import re
UpperCamelCase_ = """src/diffusers"""
# Pattern that looks at the indentation in a line.
UpperCamelCase_ = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
UpperCamelCase_ = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
UpperCamelCase_ = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
UpperCamelCase_ = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
UpperCamelCase_ = re.compile(r"""\[([^\]]+)\]""")
def _UpperCAmelCase ( _lowerCamelCase : List[Any] ) -> str:
_lowerCAmelCase : Dict = _re_indent.search(_lowerCamelCase )
return "" if search is None else search.groups()[0]
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str]="" , _lowerCamelCase : str=None , _lowerCamelCase : List[Any]=None ) -> str:
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Tuple = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(_lowerCamelCase ):
index += 1
_lowerCAmelCase : List[Any] = ["""\n""".join(lines[:index] )]
else:
_lowerCAmelCase : List[str] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_lowerCAmelCase : Union[str, Any] = [lines[index]]
index += 1
while index < len(_lowerCamelCase ) and (end_prompt is None or not lines[index].startswith(_lowerCamelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_lowerCamelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(_lowerCamelCase ) )
if index < len(_lowerCamelCase ) - 1:
_lowerCAmelCase : Union[str, Any] = [lines[index + 1]]
index += 1
else:
_lowerCAmelCase : Dict = []
else:
blocks.append("""\n""".join(_lowerCamelCase ) )
_lowerCAmelCase : Tuple = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_lowerCamelCase ) > 0:
blocks.append("""\n""".join(_lowerCamelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_lowerCamelCase ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] ) -> Any:
def _inner(_lowerCamelCase : Any ):
return key(_lowerCamelCase ).lower().replace("""_""" , """""" )
return _inner
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple=None ) -> Union[str, Any]:
# If no key is provided, we use a noop.
def noop(_lowerCamelCase : List[Any] ):
return x
if key is None:
_lowerCAmelCase : Union[str, Any] = noop
# Constants are all uppercase, they go first.
_lowerCAmelCase : Any = [obj for obj in objects if key(_lowerCamelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_lowerCAmelCase : Union[str, Any] = [obj for obj in objects if key(_lowerCamelCase )[0].isupper() and not key(_lowerCamelCase ).isupper()]
# Functions begin with a lowercase, they go last.
_lowerCAmelCase : Optional[Any] = [obj for obj in objects if not key(_lowerCamelCase )[0].isupper()]
_lowerCAmelCase : List[str] = ignore_underscore(_lowerCamelCase )
return sorted(_lowerCamelCase , key=_lowerCamelCase ) + sorted(_lowerCamelCase , key=_lowerCamelCase ) + sorted(_lowerCamelCase , key=_lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : str ) -> str:
# This inner function sort imports between [ ].
def _replace(_lowerCamelCase : Union[str, Any] ):
_lowerCAmelCase : Optional[Any] = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
_lowerCAmelCase : List[str] = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : int = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(_lowerCamelCase )] ) + "]"
_lowerCAmelCase : Optional[int] = import_statement.split("""\n""" )
if len(_lowerCamelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_lowerCAmelCase : Dict = 2 if lines[1].strip() == """[""" else 1
_lowerCAmelCase : Tuple = [(i, _re_strip_line.search(_lowerCamelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_lowerCAmelCase : Tuple = sort_objects(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] )
_lowerCAmelCase : Optional[Any] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_lowerCamelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_lowerCAmelCase : str = _re_bracket_content.sub(_replace , lines[1] )
else:
_lowerCAmelCase : Tuple = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : Dict = keys[:-1]
_lowerCAmelCase : Optional[Any] = get_indent(lines[1] ) + """, """.join([f'"{k}"' for k in sort_objects(_lowerCamelCase )] )
return "\n".join(_lowerCamelCase )
else:
# Finally we have to deal with imports fitting on one line
_lowerCAmelCase : Dict = _re_bracket_content.sub(_replace , _lowerCamelCase )
return import_statement
def _UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : Union[str, Any]=True ) -> List[str]:
with open(_lowerCamelCase , """r""" ) as f:
_lowerCAmelCase : Optional[Any] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_lowerCAmelCase : Optional[Any] = split_code_in_indented_blocks(
_lowerCamelCase , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_lowerCamelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_lowerCAmelCase : List[str] = main_blocks[block_idx]
_lowerCAmelCase : int = block.split("""\n""" )
# Get to the start of the imports.
_lowerCAmelCase : Any = 0
while line_idx < len(_lowerCamelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase )
else:
line_idx += 1
if line_idx >= len(_lowerCamelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
_lowerCAmelCase : Any = """\n""".join(block_lines[line_idx:-1] )
_lowerCAmelCase : Tuple = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_lowerCAmelCase : Optional[Any] = split_code_in_indented_blocks(_lowerCamelCase , indent_level=_lowerCamelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
_lowerCAmelCase : List[Any] = _re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_lowerCAmelCase : Tuple = [(pattern.search(_lowerCamelCase ).groups()[0] if pattern.search(_lowerCamelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_lowerCAmelCase : List[str] = [(i, key) for i, key in enumerate(_lowerCamelCase ) if key is not None]
_lowerCAmelCase : List[str] = [x[0] for x in sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : List[str] = []
for i in range(len(_lowerCamelCase ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
_lowerCAmelCase : Any = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(_lowerCamelCase )
count += 1
# And we put our main block back together with its first and last line.
_lowerCAmelCase : str = """\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(_lowerCamelCase ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(_lowerCamelCase , """w""" ) as f:
f.write("""\n""".join(_lowerCamelCase ) )
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any]=True ) -> Any:
_lowerCAmelCase : List[Any] = []
for root, _, files in os.walk(_lowerCamelCase ):
if "__init__.py" in files:
_lowerCAmelCase : List[Any] = sort_imports(os.path.join(_lowerCamelCase , """__init__.py""" ) , check_only=_lowerCamelCase )
if result:
_lowerCAmelCase : str = [os.path.join(_lowerCamelCase , """__init__.py""" )]
if len(_lowerCamelCase ) > 0:
raise ValueError(f'Would overwrite {len(_lowerCamelCase )} files, run `make style`.' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
UpperCamelCase_ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 309
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class a_ (_a ):
__lowerCAmelCase : List[Any] = """microsoft/speecht5_tts"""
__lowerCAmelCase : List[Any] = (
"""This is a tool that reads an English text out loud. It takes an input named `text` which should contain the """
"""text to read (in English) and returns a waveform object containing the sound."""
)
__lowerCAmelCase : List[str] = """text_reader"""
__lowerCAmelCase : Optional[Any] = SpeechTaProcessor
__lowerCAmelCase : str = SpeechTaForTextToSpeech
__lowerCAmelCase : int = SpeechTaHifiGan
__lowerCAmelCase : int = ["""text"""]
__lowerCAmelCase : int = ["""audio"""]
def __UpperCamelCase ( self ):
if self.post_processor is None:
_lowerCAmelCase : int = """microsoft/speecht5_hifigan"""
super().setup()
def __UpperCamelCase ( self , snake_case_ , snake_case_=None ):
_lowerCAmelCase : Tuple = self.pre_processor(text=snake_case_ , return_tensors="""pt""" , truncation=snake_case_ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" )
_lowerCAmelCase : List[str] = load_dataset("""Matthijs/cmu-arctic-xvectors""" , split="""validation""" )
_lowerCAmelCase : Any = torch.tensor(embeddings_dataset[7_3_0_5]["""xvector"""] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def __UpperCamelCase ( self , snake_case_ ):
with torch.no_grad():
return self.model.generate_speech(**snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
with torch.no_grad():
return self.post_processor(snake_case_ ).cpu().detach()
| 309
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class a_ (_a ):
def __UpperCamelCase ( self ):
_lowerCAmelCase : Tuple = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(snake_case_ , """tf_padding""" ) )
self.parent.assertTrue(hasattr(snake_case_ , """depth_multiplier""" ) )
class a_ :
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=3 , snake_case_=3_2 , snake_case_=0.25 , snake_case_=8 , snake_case_=8 , snake_case_=6 , snake_case_=3_2 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_="relu6" , snake_case_=1_2_8_0 , snake_case_=0.1 , snake_case_=0.02 , snake_case_=True , snake_case_=True , snake_case_=1_0 , snake_case_=None , ):
_lowerCAmelCase : Tuple = parent
_lowerCAmelCase : Optional[Any] = batch_size
_lowerCAmelCase : Dict = num_channels
_lowerCAmelCase : Optional[int] = image_size
_lowerCAmelCase : str = depth_multiplier
_lowerCAmelCase : Optional[int] = depth_divisible_by
_lowerCAmelCase : Optional[int] = min_depth
_lowerCAmelCase : Optional[int] = expand_ratio
_lowerCAmelCase : str = tf_padding
_lowerCAmelCase : List[Any] = output_stride
_lowerCAmelCase : Tuple = first_layer_is_expansion
_lowerCAmelCase : Optional[int] = finegrained_output
_lowerCAmelCase : int = hidden_act
_lowerCAmelCase : Optional[Any] = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
_lowerCAmelCase : Dict = classifier_dropout_prob
_lowerCAmelCase : List[Any] = use_labels
_lowerCAmelCase : Optional[int] = is_training
_lowerCAmelCase : Union[str, Any] = num_labels
_lowerCAmelCase : List[str] = initializer_range
_lowerCAmelCase : Union[str, Any] = scope
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : Dict = None
if self.use_labels:
_lowerCAmelCase : Any = ids_tensor([self.batch_size] , self.num_labels )
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_lowerCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels, pixel_labels
def __UpperCamelCase ( self ):
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : List[Any] = MobileNetVaModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : List[str] = model(snake_case_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : Union[str, Any] = self.num_labels
_lowerCAmelCase : Union[str, Any] = MobileNetVaForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : str = model(snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : List[Any] = self.num_labels
_lowerCAmelCase : Optional[int] = MobileNetVaForSemanticSegmentation(snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : Optional[int] = model(snake_case_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_lowerCAmelCase : int = model(snake_case_ , labels=snake_case_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = config_and_inputs
_lowerCAmelCase : Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a_ (_a , _a , unittest.TestCase ):
__lowerCAmelCase : Dict = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__lowerCAmelCase : int = (
{
"""feature-extraction""": MobileNetVaModel,
"""image-classification""": MobileNetVaForImageClassification,
"""image-segmentation""": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__lowerCAmelCase : str = False
__lowerCAmelCase : List[str] = False
__lowerCAmelCase : Any = False
__lowerCAmelCase : Dict = False
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[Any] = MobileNetVaModelTester(self )
_lowerCAmelCase : List[str] = MobileNetVaConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def __UpperCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV2 does not use inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip(reason="""MobileNetV2 does not support input and output embeddings""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip(reason="""MobileNetV2 does not output attentions""" )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[Any] = model_class(snake_case_ )
_lowerCAmelCase : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Tuple = [*signature.parameters.keys()]
_lowerCAmelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def __UpperCamelCase ( self ):
def check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : str = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
_lowerCAmelCase : str = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
_lowerCAmelCase : int = outputs.hidden_states
_lowerCAmelCase : Any = 1_6
self.assertEqual(len(snake_case_ ) , snake_case_ )
_lowerCAmelCase , _lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Tuple = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase : List[Any] = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case_ )
@slow
def __UpperCamelCase ( self ):
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Any = MobileNetVaModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def _UpperCAmelCase ( ) -> List[str]:
_lowerCAmelCase : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a_ (unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self ):
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v2_1.0_224""" ) if is_vision_available() else None
)
@slow
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[Any] = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v2_1.0_224""" ).to(snake_case_ )
_lowerCAmelCase : Union[str, Any] = self.default_image_processor
_lowerCAmelCase : Union[str, Any] = prepare_img()
_lowerCAmelCase : Tuple = image_processor(images=snake_case_ , return_tensors="""pt""" ).to(snake_case_ )
# forward pass
with torch.no_grad():
_lowerCAmelCase : str = model(**snake_case_ )
# verify the logits
_lowerCAmelCase : Any = torch.Size((1, 1_0_0_1) )
self.assertEqual(outputs.logits.shape , snake_case_ )
_lowerCAmelCase : int = torch.tensor([0.2445, -1.1993, 0.1905] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1E-4 ) )
@slow
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = MobileNetVaForSemanticSegmentation.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
_lowerCAmelCase : Dict = model.to(snake_case_ )
_lowerCAmelCase : Optional[Any] = MobileNetVaImageProcessor.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
_lowerCAmelCase : Union[str, Any] = prepare_img()
_lowerCAmelCase : Dict = image_processor(images=snake_case_ , return_tensors="""pt""" ).to(snake_case_ )
# forward pass
with torch.no_grad():
_lowerCAmelCase : int = model(**snake_case_ )
_lowerCAmelCase : List[Any] = outputs.logits
# verify the logits
_lowerCAmelCase : Optional[int] = torch.Size((1, 2_1, 6_5, 6_5) )
self.assertEqual(logits.shape , snake_case_ )
_lowerCAmelCase : str = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=snake_case_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , snake_case_ , atol=1E-4 ) )
| 309
|
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _UpperCAmelCase ( ) -> Tuple:
_lowerCAmelCase : List[Any] = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"""
_lowerCAmelCase : int = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert("""RGB""" )
return image
def _UpperCAmelCase ( _lowerCamelCase : Any ) -> Dict:
_lowerCAmelCase : str = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.weight', f'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.bias', f'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.weight', f'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.bias', f'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.qkv.weight', f'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.weight', f'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.bias', f'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.weight', f'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.bias', f'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.weight', f'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.bias', f'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") )
# fmt: on
return rename_keys
def _UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any] ) -> Optional[Any]:
_lowerCAmelCase : str = dct.pop(_lowerCamelCase )
_lowerCAmelCase : str = val
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple ) -> Tuple:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_lowerCAmelCase : Tuple = state_dict.pop(f'visual_encoder.blocks.{i}.attn.q_bias' )
_lowerCAmelCase : Optional[Any] = state_dict.pop(f'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
_lowerCAmelCase : int = torch.cat((q_bias, torch.zeros_like(_lowerCamelCase , requires_grad=_lowerCamelCase ), v_bias) )
_lowerCAmelCase : str = qkv_bias
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] ) -> List[Any]:
_lowerCAmelCase : str = 3_64 if """coco""" in model_name else 2_24
_lowerCAmelCase : str = BlipaVisionConfig(image_size=_lowerCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
_lowerCAmelCase : int = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=_lowerCamelCase ).to_dict()
elif "opt-6.7b" in model_name:
_lowerCAmelCase : Union[str, Any] = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=_lowerCamelCase ).to_dict()
elif "t5-xl" in model_name:
_lowerCAmelCase : Optional[int] = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_lowerCAmelCase : str = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
_lowerCAmelCase : Dict = BlipaConfig(vision_config=_lowerCamelCase , text_config=_lowerCamelCase )
return config, image_size
@torch.no_grad()
def _UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : List[Any]=None , _lowerCamelCase : int=False ) -> List[str]:
_lowerCAmelCase : int = (
AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" )
if """opt""" in model_name
else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" )
)
_lowerCAmelCase : List[Any] = tokenizer("""\n""" , add_special_tokens=_lowerCamelCase ).input_ids[0]
_lowerCAmelCase , _lowerCAmelCase : List[str] = get_blipa_config(_lowerCamelCase , eos_token_id=_lowerCamelCase )
_lowerCAmelCase : Optional[int] = BlipaForConditionalGeneration(_lowerCamelCase ).eval()
_lowerCAmelCase : Union[str, Any] = {
"""blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""),
"""blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""),
"""blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""),
"""blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""),
"""blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""),
"""blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""),
"""blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""),
}
_lowerCAmelCase , _lowerCAmelCase : List[str] = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
_lowerCAmelCase : Dict = """cuda""" if torch.cuda.is_available() else """cpu"""
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = load_model_and_preprocess(
name=_lowerCamelCase , model_type=_lowerCamelCase , is_eval=_lowerCamelCase , device=_lowerCamelCase )
original_model.eval()
print("""Done!""" )
# update state dict keys
_lowerCAmelCase : List[Any] = original_model.state_dict()
_lowerCAmelCase : Optional[int] = create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_lowerCAmelCase : Tuple = state_dict.pop(_lowerCamelCase )
if key.startswith("""Qformer.bert""" ):
_lowerCAmelCase : List[Any] = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
_lowerCAmelCase : Optional[int] = key.replace("""self""" , """attention""" )
if "opt_proj" in key:
_lowerCAmelCase : Dict = key.replace("""opt_proj""" , """language_projection""" )
if "t5_proj" in key:
_lowerCAmelCase : Tuple = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""opt""" ):
_lowerCAmelCase : List[Any] = key.replace("""opt""" , """language""" )
if key.startswith("""t5""" ):
_lowerCAmelCase : int = key.replace("""t5""" , """language""" )
_lowerCAmelCase : Tuple = val
# read in qv biases
read_in_q_v_bias(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = hf_model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert len(_lowerCamelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
_lowerCAmelCase : Union[str, Any] = load_demo_image()
_lowerCAmelCase : Optional[int] = vis_processors["""eval"""](_lowerCamelCase ).unsqueeze(0 ).to(_lowerCamelCase )
_lowerCAmelCase : List[str] = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(_lowerCamelCase )
# create processor
_lowerCAmelCase : Optional[int] = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=_lowerCamelCase , image_std=_lowerCamelCase )
_lowerCAmelCase : Tuple = BlipaProcessor(image_processor=_lowerCamelCase , tokenizer=_lowerCamelCase )
_lowerCAmelCase : Any = processor(images=_lowerCamelCase , return_tensors="""pt""" ).pixel_values.to(_lowerCamelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
original_model.to(_lowerCamelCase )
hf_model.to(_lowerCamelCase )
with torch.no_grad():
if "opt" in model_name:
_lowerCAmelCase : Optional[Any] = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits
_lowerCAmelCase : Optional[Any] = hf_model(_lowerCamelCase , _lowerCamelCase ).logits
else:
_lowerCAmelCase : List[Any] = original_model(
{"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits
_lowerCAmelCase : Tuple = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
_lowerCAmelCase : Dict = hf_model(_lowerCamelCase , _lowerCamelCase , labels=_lowerCamelCase ).logits
assert original_logits.shape == logits.shape
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
_lowerCAmelCase : Any = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=_lowerCamelCase )
assert torch.allclose(logits[0, :3, :3] , _lowerCamelCase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
_lowerCAmelCase : List[Any] = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=_lowerCamelCase )
else:
# cast to same type
_lowerCAmelCase : Union[str, Any] = logits.dtype
assert torch.allclose(original_logits.to(_lowerCamelCase ) , _lowerCamelCase , atol=1e-2 )
print("""Looks ok!""" )
print("""Generating a caption...""" )
_lowerCAmelCase : Optional[int] = """"""
_lowerCAmelCase : Union[str, Any] = tokenizer(_lowerCamelCase , return_tensors="""pt""" ).input_ids.to(_lowerCamelCase )
_lowerCAmelCase : List[Any] = original_model.generate({"""image""": original_pixel_values} )
_lowerCAmelCase : Dict = hf_model.generate(
_lowerCamelCase , _lowerCamelCase , do_sample=_lowerCamelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("""Original generation:""" , _lowerCamelCase )
_lowerCAmelCase : int = input_ids.shape[1]
_lowerCAmelCase : str = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowerCamelCase )
_lowerCAmelCase : List[str] = [text.strip() for text in output_text]
print("""HF generation:""" , _lowerCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if push_to_hub:
processor.push_to_hub(f'nielsr/{model_name}' )
hf_model.push_to_hub(f'nielsr/{model_name}' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
UpperCamelCase_ = [
"""blip2-opt-2.7b""",
"""blip2-opt-6.7b""",
"""blip2-opt-2.7b-coco""",
"""blip2-opt-6.7b-coco""",
"""blip2-flan-t5-xl""",
"""blip2-flan-t5-xl-coco""",
"""blip2-flan-t5-xxl""",
]
parser.add_argument(
"""--model_name""",
default="""blip2-opt-2.7b""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
UpperCamelCase_ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 309
| 1
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCamelCase_ = abspath(join(dirname(dirname(__file__)), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def _UpperCAmelCase ( _lowerCamelCase : List[Any] ) -> List[str]:
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] ) -> Any:
from diffusers.utils.testing_utils import pytest_terminal_summary_main
_lowerCAmelCase : Union[str, Any] = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(_lowerCamelCase , id=_lowerCamelCase )
| 309
|
'''simple docstring'''
import argparse
import os
import re
UpperCamelCase_ = """src/diffusers"""
# Pattern that looks at the indentation in a line.
UpperCamelCase_ = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
UpperCamelCase_ = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
UpperCamelCase_ = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
UpperCamelCase_ = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
UpperCamelCase_ = re.compile(r"""\[([^\]]+)\]""")
def _UpperCAmelCase ( _lowerCamelCase : List[Any] ) -> str:
_lowerCAmelCase : Dict = _re_indent.search(_lowerCamelCase )
return "" if search is None else search.groups()[0]
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str]="" , _lowerCamelCase : str=None , _lowerCamelCase : List[Any]=None ) -> str:
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Tuple = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(_lowerCamelCase ):
index += 1
_lowerCAmelCase : List[Any] = ["""\n""".join(lines[:index] )]
else:
_lowerCAmelCase : List[str] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_lowerCAmelCase : Union[str, Any] = [lines[index]]
index += 1
while index < len(_lowerCamelCase ) and (end_prompt is None or not lines[index].startswith(_lowerCamelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_lowerCamelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(_lowerCamelCase ) )
if index < len(_lowerCamelCase ) - 1:
_lowerCAmelCase : Union[str, Any] = [lines[index + 1]]
index += 1
else:
_lowerCAmelCase : Dict = []
else:
blocks.append("""\n""".join(_lowerCamelCase ) )
_lowerCAmelCase : Tuple = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_lowerCamelCase ) > 0:
blocks.append("""\n""".join(_lowerCamelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_lowerCamelCase ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] ) -> Any:
def _inner(_lowerCamelCase : Any ):
return key(_lowerCamelCase ).lower().replace("""_""" , """""" )
return _inner
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple=None ) -> Union[str, Any]:
# If no key is provided, we use a noop.
def noop(_lowerCamelCase : List[Any] ):
return x
if key is None:
_lowerCAmelCase : Union[str, Any] = noop
# Constants are all uppercase, they go first.
_lowerCAmelCase : Any = [obj for obj in objects if key(_lowerCamelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_lowerCAmelCase : Union[str, Any] = [obj for obj in objects if key(_lowerCamelCase )[0].isupper() and not key(_lowerCamelCase ).isupper()]
# Functions begin with a lowercase, they go last.
_lowerCAmelCase : Optional[Any] = [obj for obj in objects if not key(_lowerCamelCase )[0].isupper()]
_lowerCAmelCase : List[str] = ignore_underscore(_lowerCamelCase )
return sorted(_lowerCamelCase , key=_lowerCamelCase ) + sorted(_lowerCamelCase , key=_lowerCamelCase ) + sorted(_lowerCamelCase , key=_lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : str ) -> str:
# This inner function sort imports between [ ].
def _replace(_lowerCamelCase : Union[str, Any] ):
_lowerCAmelCase : Optional[Any] = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
_lowerCAmelCase : List[str] = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : int = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(_lowerCamelCase )] ) + "]"
_lowerCAmelCase : Optional[int] = import_statement.split("""\n""" )
if len(_lowerCamelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_lowerCAmelCase : Dict = 2 if lines[1].strip() == """[""" else 1
_lowerCAmelCase : Tuple = [(i, _re_strip_line.search(_lowerCamelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_lowerCAmelCase : Tuple = sort_objects(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] )
_lowerCAmelCase : Optional[Any] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_lowerCamelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_lowerCAmelCase : str = _re_bracket_content.sub(_replace , lines[1] )
else:
_lowerCAmelCase : Tuple = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : Dict = keys[:-1]
_lowerCAmelCase : Optional[Any] = get_indent(lines[1] ) + """, """.join([f'"{k}"' for k in sort_objects(_lowerCamelCase )] )
return "\n".join(_lowerCamelCase )
else:
# Finally we have to deal with imports fitting on one line
_lowerCAmelCase : Dict = _re_bracket_content.sub(_replace , _lowerCamelCase )
return import_statement
def _UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : Union[str, Any]=True ) -> List[str]:
with open(_lowerCamelCase , """r""" ) as f:
_lowerCAmelCase : Optional[Any] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_lowerCAmelCase : Optional[Any] = split_code_in_indented_blocks(
_lowerCamelCase , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_lowerCamelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_lowerCAmelCase : List[str] = main_blocks[block_idx]
_lowerCAmelCase : int = block.split("""\n""" )
# Get to the start of the imports.
_lowerCAmelCase : Any = 0
while line_idx < len(_lowerCamelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase )
else:
line_idx += 1
if line_idx >= len(_lowerCamelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
_lowerCAmelCase : Any = """\n""".join(block_lines[line_idx:-1] )
_lowerCAmelCase : Tuple = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_lowerCAmelCase : Optional[Any] = split_code_in_indented_blocks(_lowerCamelCase , indent_level=_lowerCamelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
_lowerCAmelCase : List[Any] = _re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_lowerCAmelCase : Tuple = [(pattern.search(_lowerCamelCase ).groups()[0] if pattern.search(_lowerCamelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_lowerCAmelCase : List[str] = [(i, key) for i, key in enumerate(_lowerCamelCase ) if key is not None]
_lowerCAmelCase : List[str] = [x[0] for x in sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : List[str] = []
for i in range(len(_lowerCamelCase ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
_lowerCAmelCase : Any = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(_lowerCamelCase )
count += 1
# And we put our main block back together with its first and last line.
_lowerCAmelCase : str = """\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(_lowerCamelCase ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(_lowerCamelCase , """w""" ) as f:
f.write("""\n""".join(_lowerCamelCase ) )
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any]=True ) -> Any:
_lowerCAmelCase : List[Any] = []
for root, _, files in os.walk(_lowerCamelCase ):
if "__init__.py" in files:
_lowerCAmelCase : List[Any] = sort_imports(os.path.join(_lowerCamelCase , """__init__.py""" ) , check_only=_lowerCamelCase )
if result:
_lowerCAmelCase : str = [os.path.join(_lowerCamelCase , """__init__.py""" )]
if len(_lowerCamelCase ) > 0:
raise ValueError(f'Would overwrite {len(_lowerCamelCase )} files, run `make style`.' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
UpperCamelCase_ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 309
| 1
|
'''simple docstring'''
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
UpperCamelCase_ = logging.getLogger()
def _UpperCAmelCase ( ) -> Union[str, Any]:
_lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("""-f""" )
_lowerCAmelCase : List[Any] = parser.parse_args()
return args.f
class a_ (_a ):
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[Any] = logging.StreamHandler(sys.stdout )
logger.addHandler(snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Optional[Any] = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , """run_glue_deebert.py""" )
with patch.object(snake_case_ , """argv""" , snake_case_ ):
_lowerCAmelCase : List[Any] = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(snake_case_ , 0.666 )
@slow
@require_torch_non_multi_gpu
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = """
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
""".split()
self.run_and_check(snake_case_ )
_lowerCAmelCase : Optional[Any] = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(snake_case_ )
_lowerCAmelCase : Optional[Any] = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(snake_case_ )
| 309
|
'''simple docstring'''
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
UpperCamelCase_ = logging.get_logger(__name__)
class a_ :
def __init__( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : List[str] = question_encoder
_lowerCAmelCase : Optional[Any] = generator
_lowerCAmelCase : Optional[Any] = self.question_encoder
def __UpperCamelCase ( self , snake_case_ ):
if os.path.isfile(snake_case_ ):
raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
_lowerCAmelCase : Any = os.path.join(snake_case_ , """question_encoder_tokenizer""" )
_lowerCAmelCase : Tuple = os.path.join(snake_case_ , """generator_tokenizer""" )
self.question_encoder.save_pretrained(snake_case_ )
self.generator.save_pretrained(snake_case_ )
@classmethod
def __UpperCamelCase ( cls , snake_case_ , **snake_case_ ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
_lowerCAmelCase : Dict = kwargs.pop("""config""" , snake_case_ )
if config is None:
_lowerCAmelCase : List[Any] = RagConfig.from_pretrained(snake_case_ )
_lowerCAmelCase : int = AutoTokenizer.from_pretrained(
snake_case_ , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" )
_lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(
snake_case_ , config=config.generator , subfolder="""generator_tokenizer""" )
return cls(question_encoder=snake_case_ , generator=snake_case_ )
def __call__( self , *snake_case_ , **snake_case_ ):
return self.current_tokenizer(*snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , *snake_case_ , **snake_case_ ):
return self.generator.batch_decode(*snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , *snake_case_ , **snake_case_ ):
return self.generator.decode(*snake_case_ , **snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.question_encoder
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[Any] = self.generator
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = "longest" , snake_case_ = None , snake_case_ = True , **snake_case_ , ):
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" , snake_case_ , )
if max_length is None:
_lowerCAmelCase : Any = self.current_tokenizer.model_max_length
_lowerCAmelCase : List[Any] = self(
snake_case_ , add_special_tokens=snake_case_ , return_tensors=snake_case_ , max_length=snake_case_ , padding=snake_case_ , truncation=snake_case_ , **snake_case_ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
_lowerCAmelCase : List[str] = self.current_tokenizer.model_max_length
_lowerCAmelCase : List[str] = self(
text_target=snake_case_ , add_special_tokens=snake_case_ , return_tensors=snake_case_ , padding=snake_case_ , max_length=snake_case_ , truncation=snake_case_ , **snake_case_ , )
_lowerCAmelCase : Dict = labels["""input_ids"""]
return model_inputs
| 309
| 1
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
def _UpperCAmelCase ( _lowerCamelCase : list[float] ) -> Dict:
return np.maximum(0 , _lowerCamelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 309
|
'''simple docstring'''
# Algorithm for the pigeonhole sorting
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] ) -> List[Any]:
_lowerCAmelCase : List[Any] = min(_lowerCamelCase ) # min() finds the minimum value
_lowerCAmelCase : Tuple = max(_lowerCamelCase ) # max() finds the maximum value
_lowerCAmelCase : int = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
_lowerCAmelCase : Dict = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(_lowerCamelCase , _lowerCamelCase ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
_lowerCAmelCase : Any = 0
for count in range(_lowerCamelCase ):
while holes[count] > 0:
holes[count] -= 1
_lowerCAmelCase : Optional[int] = count + min_val
i += 1
def _UpperCAmelCase ( ) -> Optional[int]:
_lowerCAmelCase : Optional[int] = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(_lowerCamelCase )
print("""Sorted order is:""" , """ """.join(_lowerCamelCase ) )
if __name__ == "__main__":
main()
| 309
| 1
|
'''simple docstring'''
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def _UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Any , _lowerCamelCase : List[Any] , _lowerCamelCase : Dict=None , _lowerCamelCase : str=None , _lowerCamelCase : Any=None , _lowerCamelCase : Dict=None , _lowerCamelCase : int=None , ) -> Tuple:
if attention_mask is None:
_lowerCAmelCase : int = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_lowerCAmelCase : str = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_lowerCAmelCase : List[Any] = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=_lowerCamelCase )
if decoder_head_mask is None:
_lowerCAmelCase : int = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=_lowerCamelCase )
if cross_attn_head_mask is None:
_lowerCAmelCase : Optional[Any] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=_lowerCamelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class a_ :
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=7 , snake_case_=True , snake_case_=False , snake_case_=9_9 , snake_case_=1_6 , snake_case_=2 , snake_case_=4 , snake_case_=4 , snake_case_="relu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=2_0 , snake_case_=2 , snake_case_=1 , snake_case_=0 , ):
_lowerCAmelCase : int = parent
_lowerCAmelCase : Optional[int] = batch_size
_lowerCAmelCase : List[str] = seq_length
_lowerCAmelCase : Union[str, Any] = is_training
_lowerCAmelCase : Dict = use_labels
_lowerCAmelCase : Optional[int] = vocab_size
_lowerCAmelCase : str = hidden_size
_lowerCAmelCase : List[Any] = num_hidden_layers
_lowerCAmelCase : int = num_attention_heads
_lowerCAmelCase : Union[str, Any] = intermediate_size
_lowerCAmelCase : Union[str, Any] = hidden_act
_lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
_lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
_lowerCAmelCase : Optional[int] = encoder_layerdrop
_lowerCAmelCase : List[str] = decoder_layerdrop
_lowerCAmelCase : Any = max_position_embeddings
_lowerCAmelCase : Any = eos_token_id
_lowerCAmelCase : str = pad_token_id
_lowerCAmelCase : Union[str, Any] = bos_token_id
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Any = self.eos_token_id # Eos Token
_lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_lowerCAmelCase : int = input_ids.clamp(self.pad_token_id + 1 )
_lowerCAmelCase : Optional[int] = decoder_input_ids.clamp(self.pad_token_id + 1 )
_lowerCAmelCase : int = self.get_config()
_lowerCAmelCase : Union[str, Any] = prepare_mam_aaa_inputs_dict(snake_case_ , snake_case_ , snake_case_ )
return config, inputs_dict
def __UpperCamelCase ( self ):
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def __UpperCamelCase ( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : Optional[Any] = MaMaaaModel(config=snake_case_ ).get_decoder().to(snake_case_ ).eval()
_lowerCAmelCase : Optional[Any] = inputs_dict["""input_ids"""]
_lowerCAmelCase : List[str] = inputs_dict["""attention_mask"""]
_lowerCAmelCase : str = inputs_dict["""head_mask"""]
# first forward pass
_lowerCAmelCase : Optional[Any] = model(snake_case_ , attention_mask=snake_case_ , head_mask=snake_case_ , use_cache=snake_case_ )
_lowerCAmelCase , _lowerCAmelCase : List[str] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
_lowerCAmelCase : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowerCAmelCase : List[str] = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
_lowerCAmelCase : int = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCAmelCase : List[str] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
_lowerCAmelCase : Tuple = model(snake_case_ , attention_mask=snake_case_ )["""last_hidden_state"""]
_lowerCAmelCase : int = model(snake_case_ , attention_mask=snake_case_ , past_key_values=snake_case_ )[
"""last_hidden_state"""
]
# select random slice
_lowerCAmelCase : Union[str, Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCAmelCase : Dict = output_from_no_past[:, -3:, random_slice_idx].detach()
_lowerCAmelCase : Optional[int] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case_ , snake_case_ , atol=1E-2 ) )
def __UpperCamelCase ( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : Dict = MaMaaaModel(config=snake_case_ ).to(snake_case_ ).eval()
_lowerCAmelCase : Optional[Any] = model(**snake_case_ )
_lowerCAmelCase : str = outputs.encoder_last_hidden_state
_lowerCAmelCase : Any = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase : Dict = model.get_encoder()
encoder.save_pretrained(snake_case_ )
_lowerCAmelCase : Optional[Any] = MaMaaaEncoder.from_pretrained(snake_case_ ).to(snake_case_ )
_lowerCAmelCase : List[str] = encoder(inputs_dict["""input_ids"""] , attention_mask=inputs_dict["""attention_mask"""] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase : Union[str, Any] = model.get_decoder()
decoder.save_pretrained(snake_case_ )
_lowerCAmelCase : Dict = MaMaaaDecoder.from_pretrained(snake_case_ ).to(snake_case_ )
_lowerCAmelCase : Union[str, Any] = decoder(
input_ids=inputs_dict["""decoder_input_ids"""] , attention_mask=inputs_dict["""decoder_attention_mask"""] , encoder_hidden_states=snake_case_ , encoder_attention_mask=inputs_dict["""attention_mask"""] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class a_ (_a , _a , _a , unittest.TestCase ):
__lowerCAmelCase : List[str] = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
__lowerCAmelCase : int = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
__lowerCAmelCase : List[Any] = (
{
"""conversational""": MaMaaaForConditionalGeneration,
"""feature-extraction""": MaMaaaModel,
"""summarization""": MaMaaaForConditionalGeneration,
"""text2text-generation""": MaMaaaForConditionalGeneration,
"""translation""": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
__lowerCAmelCase : List[str] = True
__lowerCAmelCase : int = True
__lowerCAmelCase : int = False
__lowerCAmelCase : Optional[Any] = False
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = MaMaaaModelTester(self )
_lowerCAmelCase : int = ConfigTester(self , config_class=snake_case_ )
def __UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_lowerCAmelCase : Dict = model_class(snake_case_ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ )
_lowerCAmelCase , _lowerCAmelCase : Any = model_class.from_pretrained(snake_case_ , output_loading_info=snake_case_ )
self.assertEqual(info["""missing_keys"""] , [] )
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
_lowerCAmelCase : Tuple = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : List[Any] = copy.deepcopy(self._prepare_for_class(snake_case_ , snake_case_ ) )
if not self.is_encoder_decoder:
_lowerCAmelCase : str = inputs["""input_ids"""]
del inputs["input_ids"]
else:
_lowerCAmelCase : Optional[int] = inputs["""input_ids"""]
_lowerCAmelCase : Tuple = inputs.get("""decoder_input_ids""" , snake_case_ )
del inputs["input_ids"]
inputs.pop("""decoder_input_ids""" , snake_case_ )
_lowerCAmelCase : Dict = model.get_input_embeddings()
if not self.is_encoder_decoder:
_lowerCAmelCase : Tuple = wte(snake_case_ )
else:
_lowerCAmelCase : int = wte(snake_case_ )
_lowerCAmelCase : List[Any] = wte(snake_case_ )
with torch.no_grad():
model(**snake_case_ )[0]
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
_lowerCAmelCase : Dict = input_dict["""input_ids"""]
_lowerCAmelCase : Optional[int] = input_ids.ne(1 ).to(snake_case_ )
_lowerCAmelCase : Dict = MaMaaaForConditionalGeneration(snake_case_ ).eval().to(snake_case_ )
if torch_device == "cuda":
model.half()
model.generate(snake_case_ , attention_mask=snake_case_ )
model.generate(num_beams=4 , do_sample=snake_case_ , early_stopping=snake_case_ , num_return_sequences=3 )
def _UpperCAmelCase ( _lowerCamelCase : Any ) -> Dict:
return torch.tensor(_lowerCamelCase , dtype=torch.long , device=_lowerCamelCase )
UpperCamelCase_ = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class a_ (unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self ):
return MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Any = MaMaaaModel.from_pretrained("""facebook/m2m100_418M""" ).to(snake_case_ )
_lowerCAmelCase : Tuple = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
_lowerCAmelCase : Optional[int] = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
_lowerCAmelCase : Tuple = prepare_mam_aaa_inputs_dict(model.config , snake_case_ , snake_case_ )
with torch.no_grad():
_lowerCAmelCase : str = model(**snake_case_ )[0]
_lowerCAmelCase : Optional[int] = torch.Size((1, 1_1, 1_0_2_4) )
self.assertEqual(output.shape , snake_case_ )
# change to expected output here
_lowerCAmelCase : Union[str, Any] = torch.tensor(
[[-0.7780, -0.1676, 0.1038], [-6.7556, -1.3992, 0.0567], [-7.5383, -0.5920, -0.2779]] , device=snake_case_ )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case_ , atol=snake_case_ ) )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(snake_case_ )
# change to intended input
_lowerCAmelCase : List[str] = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
_lowerCAmelCase : List[str] = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
_lowerCAmelCase : Tuple = prepare_mam_aaa_inputs_dict(model.config , snake_case_ , snake_case_ )
with torch.no_grad():
_lowerCAmelCase : Optional[int] = model(**snake_case_ )[0]
_lowerCAmelCase : List[Any] = torch.Size((1, 1_1, model.config.vocab_size) )
self.assertEqual(output.shape , snake_case_ )
# change to expected output here
_lowerCAmelCase : Optional[Any] = torch.tensor(
[[-1.0448, -1.0411, 3.7992], [-3.2191, -3.2386, -1.3451], [-3.6210, -3.5993, 0.4925]] , device=snake_case_ )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case_ , atol=snake_case_ ) )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Tuple = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(snake_case_ )
_lowerCAmelCase : str = MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" , src_lang="""fr""" , tgt_lang="""en""" )
_lowerCAmelCase : Tuple = [
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent"""
""" Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de"""
""" l'ampleur de la surveillance américaine sur l'ensemble des communications en France.""",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
_lowerCAmelCase : Dict = tokenizer(snake_case_ , padding=snake_case_ , return_tensors="""pt""" )
_lowerCAmelCase : int = model.generate(
input_ids=dct["""input_ids"""].to(snake_case_ ) , attention_mask=dct["""attention_mask"""].to(snake_case_ ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("""en""" ) , )
_lowerCAmelCase : List[str] = [
"""The NSA case highlights the total absence of intelligence debate""",
"""I think there are two levels of response from the French government.""",
"""When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."""
""" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"""
""" communications in France.""",
]
_lowerCAmelCase : Dict = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=snake_case_ , skip_special_tokens=snake_case_ )
assert generated == expected_en
| 309
|
'''simple docstring'''
def _UpperCAmelCase ( _lowerCamelCase : int = 1_00 ) -> int:
_lowerCAmelCase : Optional[Any] = (n * (n + 1) // 2) ** 2
_lowerCAmelCase : str = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F'{solution() = }')
| 309
| 1
|
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class a_ (_a ):
__lowerCAmelCase : Tuple = """MCTCTFeatureExtractor"""
__lowerCAmelCase : str = """AutoTokenizer"""
def __init__( self , snake_case_ , snake_case_ ):
super().__init__(snake_case_ , snake_case_ )
_lowerCAmelCase : Optional[Any] = self.feature_extractor
_lowerCAmelCase : Any = False
def __call__( self , *snake_case_ , **snake_case_ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*snake_case_ , **snake_case_ )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
_lowerCAmelCase : Union[str, Any] = kwargs.pop("""raw_speech""" )
else:
_lowerCAmelCase : Dict = kwargs.pop("""audio""" , snake_case_ )
_lowerCAmelCase : str = kwargs.pop("""sampling_rate""" , snake_case_ )
_lowerCAmelCase : Tuple = kwargs.pop("""text""" , snake_case_ )
if len(snake_case_ ) > 0:
_lowerCAmelCase : Optional[Any] = args[0]
_lowerCAmelCase : Dict = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
_lowerCAmelCase : Tuple = self.feature_extractor(snake_case_ , *snake_case_ , sampling_rate=snake_case_ , **snake_case_ )
if text is not None:
_lowerCAmelCase : int = self.tokenizer(snake_case_ , **snake_case_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_lowerCAmelCase : int = encodings["""input_ids"""]
return inputs
def __UpperCamelCase ( self , *snake_case_ , **snake_case_ ):
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , *snake_case_ , **snake_case_ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*snake_case_ , **snake_case_ )
_lowerCAmelCase : Dict = kwargs.pop("""input_features""" , snake_case_ )
_lowerCAmelCase : str = kwargs.pop("""labels""" , snake_case_ )
if len(snake_case_ ) > 0:
_lowerCAmelCase : Tuple = args[0]
_lowerCAmelCase : Tuple = args[1:]
if input_features is not None:
_lowerCAmelCase : str = self.feature_extractor.pad(snake_case_ , *snake_case_ , **snake_case_ )
if labels is not None:
_lowerCAmelCase : Any = self.tokenizer.pad(snake_case_ , **snake_case_ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
_lowerCAmelCase : str = labels["""input_ids"""]
return input_features
def __UpperCamelCase ( self , *snake_case_ , **snake_case_ ):
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@contextmanager
def __UpperCamelCase ( self ):
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : List[Any] = self.tokenizer
yield
_lowerCAmelCase : List[Any] = self.feature_extractor
_lowerCAmelCase : Any = False
| 309
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FocalNetForImageClassification""",
"""FocalNetForMaskedImageModeling""",
"""FocalNetBackbone""",
"""FocalNetModel""",
"""FocalNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
| 1
|
'''simple docstring'''
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple ) -> List[str]:
try:
with open(_lowerCamelCase , """rb""" ) as flax_state_f:
_lowerCAmelCase : List[str] = from_bytes(_lowerCamelCase , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(_lowerCamelCase ) as f:
if f.read().startswith("""version""" ):
raise OSError(
"""You seem to have cloned a repository without having git-lfs installed. Please"""
""" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"""
""" folder you cloned.""" )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(f'Unable to convert {model_file} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(_lowerCamelCase , _lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : Any , _lowerCamelCase : List[str] ) -> Union[str, Any]:
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
_lowerCAmelCase : str = flatten_dict(jax.tree_util.tree_map(lambda _lowerCamelCase : x.dtype == jnp.bfloataa , _lowerCamelCase ) ).values()
if any(_lowerCamelCase ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
_lowerCAmelCase : Union[str, Any] = jax.tree_util.tree_map(
lambda _lowerCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _lowerCamelCase )
_lowerCAmelCase : Any = """"""
_lowerCAmelCase : Any = flatten_dict(_lowerCamelCase , sep=""".""" )
_lowerCAmelCase : Tuple = pt_model.state_dict()
# keep track of unexpected & missing keys
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : int = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
_lowerCAmelCase : Dict = flax_key_tuple.split(""".""" )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
_lowerCAmelCase : int = flax_key_tuple_array[:-1] + ["""weight"""]
_lowerCAmelCase : Tuple = jnp.transpose(_lowerCamelCase , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
_lowerCAmelCase : List[str] = flax_key_tuple_array[:-1] + ["""weight"""]
_lowerCAmelCase : Optional[int] = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
_lowerCAmelCase : str = flax_key_tuple_array[:-1] + ["""weight"""]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(_lowerCamelCase ):
_lowerCAmelCase : List[str] = (
flax_key_tuple_string.replace("""_0""" , """.0""" )
.replace("""_1""" , """.1""" )
.replace("""_2""" , """.2""" )
.replace("""_3""" , """.3""" )
.replace("""_4""" , """.4""" )
.replace("""_5""" , """.5""" )
.replace("""_6""" , """.6""" )
.replace("""_7""" , """.7""" )
.replace("""_8""" , """.8""" )
.replace("""_9""" , """.9""" )
)
_lowerCAmelCase : int = """.""".join(_lowerCamelCase )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
f'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
_lowerCAmelCase : Dict = np.asarray(_lowerCamelCase ) if not isinstance(_lowerCamelCase , np.ndarray ) else flax_tensor
_lowerCAmelCase : Any = torch.from_numpy(_lowerCamelCase )
# remove from missing keys
missing_keys.remove(_lowerCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_lowerCamelCase )
pt_model.load_state_dict(_lowerCamelCase )
# re-transform missing_keys to list
_lowerCAmelCase : Any = list(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
f' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
f' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
f' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
if len(_lowerCamelCase ) > 0:
logger.warning(
f'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
f' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
""" use it for predictions and inference.""" )
return pt_model
| 309
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
UpperCamelCase_ = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""DPTFeatureExtractor"""]
UpperCamelCase_ = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
| 1
|
'''simple docstring'''
# Algorithm for the pigeonhole sorting
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] ) -> List[Any]:
_lowerCAmelCase : List[Any] = min(_lowerCamelCase ) # min() finds the minimum value
_lowerCAmelCase : Tuple = max(_lowerCamelCase ) # max() finds the maximum value
_lowerCAmelCase : int = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
_lowerCAmelCase : Dict = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(_lowerCamelCase , _lowerCamelCase ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
_lowerCAmelCase : Any = 0
for count in range(_lowerCamelCase ):
while holes[count] > 0:
holes[count] -= 1
_lowerCAmelCase : Optional[int] = count + min_val
i += 1
def _UpperCAmelCase ( ) -> Optional[int]:
_lowerCAmelCase : Optional[int] = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(_lowerCamelCase )
print("""Sorted order is:""" , """ """.join(_lowerCamelCase ) )
if __name__ == "__main__":
main()
| 309
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
def _UpperCAmelCase ( _lowerCamelCase : list[float] ) -> Dict:
return np.maximum(0 , _lowerCamelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 309
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase_ = {
"""configuration_mvp""": ["""MVP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MvpConfig""", """MvpOnnxConfig"""],
"""tokenization_mvp""": ["""MvpTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""MvpTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""MVP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MvpForCausalLM""",
"""MvpForConditionalGeneration""",
"""MvpForQuestionAnswering""",
"""MvpForSequenceClassification""",
"""MvpModel""",
"""MvpPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class a_ (_a ):
def __init__( self , *snake_case_ , **snake_case_ ):
warnings.warn(
"""The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DonutImageProcessor instead.""" , snake_case_ , )
super().__init__(*snake_case_ , **snake_case_ )
| 309
| 1
|
'''simple docstring'''
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class a_ :
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=False , snake_case_=True , snake_case_=9_9 , snake_case_=3_2 , snake_case_=5 , snake_case_=4 , snake_case_=3_7 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_1_2 , snake_case_=1_6 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ):
_lowerCAmelCase : Union[str, Any] = parent
_lowerCAmelCase : str = batch_size
_lowerCAmelCase : Tuple = seq_length
_lowerCAmelCase : int = is_training
_lowerCAmelCase : List[Any] = use_input_mask
_lowerCAmelCase : int = use_token_type_ids
_lowerCAmelCase : Dict = use_labels
_lowerCAmelCase : Optional[int] = vocab_size
_lowerCAmelCase : List[str] = hidden_size
_lowerCAmelCase : Tuple = num_hidden_layers
_lowerCAmelCase : List[str] = num_attention_heads
_lowerCAmelCase : Dict = intermediate_size
_lowerCAmelCase : List[str] = hidden_act
_lowerCAmelCase : List[str] = hidden_dropout_prob
_lowerCAmelCase : Dict = attention_probs_dropout_prob
_lowerCAmelCase : Dict = max_position_embeddings
_lowerCAmelCase : Optional[Any] = type_vocab_size
_lowerCAmelCase : List[Any] = type_sequence_label_size
_lowerCAmelCase : Optional[Any] = initializer_range
_lowerCAmelCase : int = num_labels
_lowerCAmelCase : Union[str, Any] = num_choices
_lowerCAmelCase : Optional[Any] = scope
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : str = None
if self.use_input_mask:
_lowerCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase : Union[str, Any] = None
if self.use_token_type_ids:
_lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : Tuple = None
_lowerCAmelCase : Tuple = None
if self.use_labels:
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self ):
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : Dict = BioGptModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : Any = model(snake_case_ , attention_mask=snake_case_ )
_lowerCAmelCase : str = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
_lowerCAmelCase : List[str] = BioGptForCausalLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , *snake_case_ ):
_lowerCAmelCase : Optional[int] = BioGptModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
# create attention mask
_lowerCAmelCase : int = torch.ones(input_ids.shape , dtype=torch.long , device=snake_case_ )
_lowerCAmelCase : str = self.seq_length // 2
_lowerCAmelCase : List[str] = 0
# first forward pass
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = model(snake_case_ , attention_mask=snake_case_ ).to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowerCAmelCase : List[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
_lowerCAmelCase : List[str] = ids_tensor((1,) , snake_case_ ).item() + 1
_lowerCAmelCase : int = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
_lowerCAmelCase : Optional[Any] = random_other_next_tokens
# append to next input_ids and attn_mask
_lowerCAmelCase : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCAmelCase : Optional[int] = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=snake_case_ )] , dim=1 , )
# get two different outputs
_lowerCAmelCase : Optional[int] = model(snake_case_ , attention_mask=snake_case_ )["""last_hidden_state"""]
_lowerCAmelCase : List[Any] = model(snake_case_ , past_key_values=snake_case_ , attention_mask=snake_case_ )["""last_hidden_state"""]
# select random slice
_lowerCAmelCase : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCAmelCase : str = output_from_no_past[:, -1, random_slice_idx].detach()
_lowerCAmelCase : Dict = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case_ , snake_case_ , atol=1E-3 ) )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , *snake_case_ ):
_lowerCAmelCase : Tuple = BioGptModel(config=snake_case_ ).to(snake_case_ ).eval()
_lowerCAmelCase : Optional[int] = torch.ones(input_ids.shape , dtype=torch.long , device=snake_case_ )
# first forward pass
_lowerCAmelCase : Dict = model(snake_case_ , attention_mask=snake_case_ , use_cache=snake_case_ )
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
_lowerCAmelCase : str = ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowerCAmelCase : Dict = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
_lowerCAmelCase : str = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCAmelCase : Any = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
_lowerCAmelCase : Tuple = model(snake_case_ , attention_mask=snake_case_ )["""last_hidden_state"""]
_lowerCAmelCase : Optional[int] = model(snake_case_ , attention_mask=snake_case_ , past_key_values=snake_case_ )[
"""last_hidden_state"""
]
# select random slice
_lowerCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCAmelCase : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
_lowerCAmelCase : str = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case_ , snake_case_ , atol=1E-3 ) )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , *snake_case_ , snake_case_=False ):
_lowerCAmelCase : Optional[int] = BioGptForCausalLM(snake_case_ )
model.to(snake_case_ )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
_lowerCAmelCase : Optional[int] = model(snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def __UpperCamelCase ( self , snake_case_ , *snake_case_ ):
_lowerCAmelCase : Tuple = BioGptModel(snake_case_ )
_lowerCAmelCase : int = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , *snake_case_ ):
_lowerCAmelCase : Tuple = self.num_labels
_lowerCAmelCase : Union[str, Any] = BioGptForTokenClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : Optional[int] = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Any = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Tuple = config_and_inputs
_lowerCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a_ (_a , _a , _a , unittest.TestCase ):
__lowerCAmelCase : Optional[int] = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
__lowerCAmelCase : Optional[int] = (BioGptForCausalLM,) if is_torch_available() else ()
__lowerCAmelCase : Tuple = (
{
"""feature-extraction""": BioGptModel,
"""text-classification""": BioGptForSequenceClassification,
"""text-generation""": BioGptForCausalLM,
"""token-classification""": BioGptForTokenClassification,
"""zero-shot""": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCAmelCase : Any = False
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = BioGptModelTester(self )
_lowerCAmelCase : Dict = ConfigTester(self , config_class=snake_case_ , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCAmelCase : int = type
self.model_tester.create_and_check_model(*snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*snake_case_ , gradient_checkpointing=snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*snake_case_ )
@slow
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
model.to(snake_case_ )
_lowerCAmelCase : Union[str, Any] = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
_lowerCAmelCase : Optional[int] = """left"""
# Define PAD Token = EOS Token = 50256
_lowerCAmelCase : Optional[int] = tokenizer.eos_token
_lowerCAmelCase : int = model.config.eos_token_id
# use different length sentences to test batching
_lowerCAmelCase : str = [
"""Hello, my dog is a little""",
"""Today, I""",
]
_lowerCAmelCase : Tuple = tokenizer(snake_case_ , return_tensors="""pt""" , padding=snake_case_ )
_lowerCAmelCase : Optional[int] = inputs["""input_ids"""].to(snake_case_ )
_lowerCAmelCase : Union[str, Any] = model.generate(
input_ids=snake_case_ , attention_mask=inputs["""attention_mask"""].to(snake_case_ ) , )
_lowerCAmelCase : int = tokenizer(sentences[0] , return_tensors="""pt""" ).input_ids.to(snake_case_ )
_lowerCAmelCase : Optional[int] = model.generate(input_ids=snake_case_ )
_lowerCAmelCase : Optional[int] = inputs_non_padded.shape[-1] - inputs["""attention_mask"""][-1].long().sum().cpu().item()
_lowerCAmelCase : Optional[int] = tokenizer(sentences[1] , return_tensors="""pt""" ).input_ids.to(snake_case_ )
_lowerCAmelCase : Any = model.generate(input_ids=snake_case_ , max_length=model.config.max_length - num_paddings )
_lowerCAmelCase : List[str] = tokenizer.batch_decode(snake_case_ , skip_special_tokens=snake_case_ )
_lowerCAmelCase : Optional[int] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=snake_case_ )
_lowerCAmelCase : List[str] = tokenizer.decode(output_padded[0] , skip_special_tokens=snake_case_ )
_lowerCAmelCase : str = [
"""Hello, my dog is a little bit bigger than a little bit.""",
"""Today, I have a good idea of how to use the information""",
]
self.assertListEqual(snake_case_ , snake_case_ )
self.assertListEqual(snake_case_ , [non_padded_sentence, padded_sentence] )
@slow
def __UpperCamelCase ( self ):
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Union[str, Any] = BioGptModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : List[Any] = 3
_lowerCAmelCase : List[str] = input_dict["""input_ids"""]
_lowerCAmelCase : Tuple = input_ids.ne(1 ).to(snake_case_ )
_lowerCAmelCase : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_lowerCAmelCase : Dict = BioGptForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : int = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : int = 3
_lowerCAmelCase : List[str] = """multi_label_classification"""
_lowerCAmelCase : Dict = input_dict["""input_ids"""]
_lowerCAmelCase : Optional[Any] = input_ids.ne(1 ).to(snake_case_ )
_lowerCAmelCase : Union[str, Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_lowerCAmelCase : Optional[int] = BioGptForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : int = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class a_ (unittest.TestCase ):
@slow
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[Any] = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
_lowerCAmelCase : Optional[Any] = torch.tensor([[2, 4_8_0_5, 9, 6_5_6, 2_1]] )
_lowerCAmelCase : str = model(snake_case_ )[0]
_lowerCAmelCase : Dict = 4_2_3_8_4
_lowerCAmelCase : Optional[Any] = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , snake_case_ )
_lowerCAmelCase : Optional[Any] = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case_ , atol=1E-4 ) )
@slow
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
_lowerCAmelCase : Union[str, Any] = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
model.to(snake_case_ )
torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = tokenizer("""COVID-19 is""" , return_tensors="""pt""" ).to(snake_case_ )
_lowerCAmelCase : Any = model.generate(
**snake_case_ , min_length=1_0_0 , max_length=1_0_2_4 , num_beams=5 , early_stopping=snake_case_ , )
_lowerCAmelCase : Dict = tokenizer.decode(output_ids[0] , skip_special_tokens=snake_case_ )
_lowerCAmelCase : Union[str, Any] = (
"""COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the"""
""" causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and"""
""" territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),"""
""" and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and"""
""" more than 800,000 deaths."""
)
self.assertEqual(snake_case_ , snake_case_ )
| 309
|
'''simple docstring'''
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class a_ (_a ):
__lowerCAmelCase : Dict = (DPMSolverSDEScheduler,)
__lowerCAmelCase : Dict = 1_0
def __UpperCamelCase ( self , **snake_case_ ):
_lowerCAmelCase : List[Any] = {
"""num_train_timesteps""": 1_1_0_0,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""noise_sampler_seed""": 0,
}
config.update(**snake_case_ )
return config
def __UpperCamelCase ( self ):
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=snake_case_ )
def __UpperCamelCase ( self ):
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=snake_case_ , beta_end=snake_case_ )
def __UpperCamelCase ( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=snake_case_ )
def __UpperCamelCase ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.scheduler_classes[0]
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : Any = scheduler_class(**snake_case_ )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCAmelCase : Tuple = self.dummy_model()
_lowerCAmelCase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCAmelCase : Optional[Any] = sample.to(snake_case_ )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Union[str, Any] = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : Union[str, Any] = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Any = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : Dict = output.prev_sample
_lowerCAmelCase : List[Any] = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1E-2
assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1E-2
assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.scheduler_classes[0]
_lowerCAmelCase : Optional[Any] = self.get_scheduler_config(prediction_type="""v_prediction""" )
_lowerCAmelCase : Dict = scheduler_class(**snake_case_ )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCAmelCase : int = self.dummy_model()
_lowerCAmelCase : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCAmelCase : int = sample.to(snake_case_ )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : List[str] = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : List[Any] = model(snake_case_ , snake_case_ )
_lowerCAmelCase : str = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : int = output.prev_sample
_lowerCAmelCase : str = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Optional[int] = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1E-2
assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1E-2
assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1E-2
assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : str = scheduler_class(**snake_case_ )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case_ )
_lowerCAmelCase : Tuple = self.dummy_model()
_lowerCAmelCase : Optional[int] = self.dummy_sample_deter.to(snake_case_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_lowerCAmelCase : str = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : Dict = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Any = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : Dict = output.prev_sample
_lowerCAmelCase : List[Any] = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1E-2
assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1E-2
assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : Any = self.scheduler_classes[0]
_lowerCAmelCase : Optional[int] = self.get_scheduler_config()
_lowerCAmelCase : Tuple = scheduler_class(**snake_case_ , use_karras_sigmas=snake_case_ )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case_ )
_lowerCAmelCase : List[Any] = self.dummy_model()
_lowerCAmelCase : str = self.dummy_sample_deter.to(snake_case_ ) * scheduler.init_noise_sigma
_lowerCAmelCase : Optional[int] = sample.to(snake_case_ )
for t in scheduler.timesteps:
_lowerCAmelCase : List[str] = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : int = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Optional[int] = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : str = output.prev_sample
_lowerCAmelCase : Optional[Any] = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
| 309
| 1
|
'''simple docstring'''
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
UpperCamelCase_ = 10
def _UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : list[int] , _lowerCamelCase : int ) -> int:
for i in range(_lowerCamelCase , _lowerCamelCase ):
if array[i] == target:
return i
return -1
def _UpperCAmelCase ( _lowerCamelCase : list[int] , _lowerCamelCase : int ) -> int:
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : Union[str, Any] = len(_lowerCamelCase )
while left <= right:
if right - left < precision:
return lin_search(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = (left + right) // 3 + 1
_lowerCAmelCase : Optional[int] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
_lowerCAmelCase : Optional[Any] = one_third - 1
elif array[two_third] < target:
_lowerCAmelCase : List[Any] = two_third + 1
else:
_lowerCAmelCase : Optional[int] = one_third + 1
_lowerCAmelCase : List[str] = two_third - 1
else:
return -1
def _UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : list[int] , _lowerCamelCase : int ) -> int:
if left < right:
if right - left < precision:
return lin_search(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : List[Any] = (left + right) // 3 + 1
_lowerCAmelCase : List[str] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(_lowerCamelCase , one_third - 1 , _lowerCamelCase , _lowerCamelCase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , _lowerCamelCase , _lowerCamelCase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase_ = input("""Enter numbers separated by comma:\n""").strip()
UpperCamelCase_ = [int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
UpperCamelCase_ = int(input("""Enter the number to be found in the list:\n""").strip())
UpperCamelCase_ = ite_ternary_search(collection, target)
UpperCamelCase_ = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F'Iterative search: {target} found at positions: {resulta}')
print(F'Recursive search: {target} found at positions: {resulta}')
else:
print("""Not found""")
| 309
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"""vocab_file""": """vocab.txt"""}
UpperCamelCase_ = {
"""vocab_file""": {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt""",
}
}
UpperCamelCase_ = {
"""YituTech/conv-bert-base""": 5_12,
"""YituTech/conv-bert-medium-small""": 5_12,
"""YituTech/conv-bert-small""": 5_12,
}
UpperCamelCase_ = {
"""YituTech/conv-bert-base""": {"""do_lower_case""": True},
"""YituTech/conv-bert-medium-small""": {"""do_lower_case""": True},
"""YituTech/conv-bert-small""": {"""do_lower_case""": True},
}
class a_ (_a ):
__lowerCAmelCase : Any = VOCAB_FILES_NAMES
__lowerCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Dict = PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Optional[int] = ConvBertTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=True , snake_case_="[UNK]" , snake_case_="[SEP]" , snake_case_="[PAD]" , snake_case_="[CLS]" , snake_case_="[MASK]" , snake_case_=True , snake_case_=None , **snake_case_ , ):
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
_lowerCAmelCase : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , snake_case_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , snake_case_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , snake_case_ ) != tokenize_chinese_chars
):
_lowerCAmelCase : Dict = getattr(snake_case_ , normalizer_state.pop("""type""" ) )
_lowerCAmelCase : List[str] = do_lower_case
_lowerCAmelCase : str = strip_accents
_lowerCAmelCase : List[Any] = tokenize_chinese_chars
_lowerCAmelCase : List[Any] = normalizer_class(**snake_case_ )
_lowerCAmelCase : str = do_lower_case
def __UpperCamelCase ( self , snake_case_ , snake_case_=None ):
_lowerCAmelCase : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : Optional[Any] = [self.sep_token_id]
_lowerCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : Any = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
| 309
| 1
|
'''simple docstring'''
from __future__ import annotations
from PIL import Image
# Define glider example
UpperCamelCase_ = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
UpperCamelCase_ = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def _UpperCAmelCase ( _lowerCamelCase : list[list[int]] ) -> list[list[int]]:
_lowerCAmelCase : int = []
for i in range(len(_lowerCamelCase ) ):
_lowerCAmelCase : Tuple = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
_lowerCAmelCase : Dict = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(_lowerCamelCase ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(_lowerCamelCase ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(_lowerCamelCase ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
_lowerCAmelCase : List[Any] = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(_lowerCamelCase )
return next_generation
def _UpperCAmelCase ( _lowerCamelCase : list[list[int]] , _lowerCamelCase : int ) -> list[Image.Image]:
_lowerCAmelCase : Dict = []
for _ in range(_lowerCamelCase ):
# Create output image
_lowerCAmelCase : Union[str, Any] = Image.new("""RGB""" , (len(cells[0] ), len(_lowerCamelCase )) )
_lowerCAmelCase : Dict = img.load()
# Save cells to image
for x in range(len(_lowerCamelCase ) ):
for y in range(len(cells[0] ) ):
_lowerCAmelCase : Optional[int] = 2_55 - cells[y][x] * 2_55
_lowerCAmelCase : Any = (colour, colour, colour)
# Save image
images.append(_lowerCamelCase )
_lowerCAmelCase : List[str] = new_generation(_lowerCamelCase )
return images
if __name__ == "__main__":
UpperCamelCase_ = generate_images(GLIDER, 16)
images[0].save("""out.gif""", save_all=True, append_images=images[1:])
| 309
|
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a_ :
def __init__( self ):
_lowerCAmelCase : Any = """"""
_lowerCAmelCase : List[Any] = """"""
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : int = 0
_lowerCAmelCase : str = 2_5_6
_lowerCAmelCase : List[str] = 0
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Dict = 0
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : str = cva.imread(snake_case_ , 0 )
_lowerCAmelCase : List[str] = copy.deepcopy(self.img )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] , label="""x""" )
_lowerCAmelCase : List[Any] = np.sum(snake_case_ )
for i in range(len(snake_case_ ) ):
_lowerCAmelCase : Optional[int] = x[i] / self.k
self.sk += prk
_lowerCAmelCase : Any = (self.L - 1) * self.sk
if self.rem != 0:
_lowerCAmelCase : Dict = int(last % last )
_lowerCAmelCase : str = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(snake_case_ )
_lowerCAmelCase : str = int(np.ma.count(self.img ) / self.img[1].size )
_lowerCAmelCase : Union[str, Any] = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
_lowerCAmelCase : Union[str, Any] = self.img[j][i]
if num != self.last_list[num]:
_lowerCAmelCase : List[str] = self.last_list[num]
cva.imwrite("""output_data/output.jpg""" , self.img )
def __UpperCamelCase ( self ):
plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] )
def __UpperCamelCase ( self ):
cva.imshow("""Output-Image""" , self.img )
cva.imshow("""Input-Image""" , self.original_image )
cva.waitKey(5_0_0_0 )
cva.destroyAllWindows()
if __name__ == "__main__":
UpperCamelCase_ = os.path.join(os.path.basename(__file__), """image_data/input.jpg""")
UpperCamelCase_ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 309
| 1
|
'''simple docstring'''
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def _UpperCAmelCase ( _lowerCamelCase : Optional[int] ) -> Dict:
_lowerCAmelCase : Optional[int] = {}
_lowerCAmelCase : str = job["""started_at"""]
_lowerCAmelCase : Any = job["""completed_at"""]
_lowerCAmelCase : Optional[Any] = date_parser.parse(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = date_parser.parse(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = round((end_datetime - start_datetime).total_seconds() / 60.0 )
_lowerCAmelCase : List[Any] = start
_lowerCAmelCase : List[str] = end
_lowerCAmelCase : Dict = duration_in_min
return job_info
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[Any]=None ) -> List[Any]:
_lowerCAmelCase : Optional[Any] = None
if token is not None:
_lowerCAmelCase : List[Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'Bearer {token}'}
_lowerCAmelCase : Tuple = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
_lowerCAmelCase : List[str] = requests.get(_lowerCamelCase , headers=_lowerCamelCase ).json()
_lowerCAmelCase : Optional[int] = {}
try:
job_time.update({job["""name"""]: extract_time_from_single_job(_lowerCamelCase ) for job in result["""jobs"""]} )
_lowerCAmelCase : Optional[int] = math.ceil((result["""total_count"""] - 1_00) / 1_00 )
for i in range(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = requests.get(url + f'&page={i + 2}' , headers=_lowerCamelCase ).json()
job_time.update({job["""name"""]: extract_time_from_single_job(_lowerCamelCase ) for job in result["""jobs"""]} )
return job_time
except Exception:
print(f'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = get_job_time(args.workflow_run_id)
UpperCamelCase_ = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F'{k}: {v["duration"]}')
| 309
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase_ = {
"""configuration_groupvit""": [
"""GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""GroupViTConfig""",
"""GroupViTOnnxConfig""",
"""GroupViTTextConfig""",
"""GroupViTVisionConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GroupViTModel""",
"""GroupViTPreTrainedModel""",
"""GroupViTTextModel""",
"""GroupViTVisionModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFGroupViTModel""",
"""TFGroupViTPreTrainedModel""",
"""TFGroupViTTextModel""",
"""TFGroupViTVisionModel""",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
| 1
|
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class a_ (_a , _a , unittest.TestCase ):
__lowerCAmelCase : List[str] = IFInpaintingPipeline
__lowerCAmelCase : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
__lowerCAmelCase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__lowerCAmelCase : Dict = PipelineTesterMixin.required_optional_params - {"""latents"""}
def __UpperCamelCase ( self ):
return self._get_dummy_components()
def __UpperCamelCase ( self , snake_case_ , snake_case_=0 ):
if str(snake_case_ ).startswith("""mps""" ):
_lowerCAmelCase : int = torch.manual_seed(snake_case_ )
else:
_lowerCAmelCase : Union[str, Any] = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
_lowerCAmelCase : List[str] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
_lowerCAmelCase : str = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
_lowerCAmelCase : Any = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __UpperCamelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __UpperCamelCase ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def __UpperCamelCase ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __UpperCamelCase ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __UpperCamelCase ( self ):
self._test_save_load_local()
def __UpperCamelCase ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 309
|
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
UpperCamelCase_ = 0
UpperCamelCase_ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
UpperCamelCase_ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
UpperCamelCase_ = tuple[int, int]
class a_ :
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
_lowerCAmelCase : Optional[int] = pos_x
_lowerCAmelCase : List[str] = pos_y
_lowerCAmelCase : Tuple = (pos_y, pos_x)
_lowerCAmelCase : List[Any] = goal_x
_lowerCAmelCase : int = goal_y
_lowerCAmelCase : Union[str, Any] = g_cost
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : List[Any] = self.calculate_heuristic()
_lowerCAmelCase : Optional[int] = self.g_cost + self.h_cost
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.pos_x - self.goal_x
_lowerCAmelCase : Optional[int] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(snake_case_ ) + abs(snake_case_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , snake_case_ ):
return self.f_cost < other.f_cost
class a_ :
def __init__( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : Optional[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , snake_case_ )
_lowerCAmelCase : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , snake_case_ )
_lowerCAmelCase : List[str] = [self.start]
_lowerCAmelCase : list[Node] = []
_lowerCAmelCase : List[str] = False
def __UpperCamelCase ( self ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
_lowerCAmelCase : Optional[int] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(snake_case_ )
self.closed_nodes.append(snake_case_ )
_lowerCAmelCase : Optional[int] = self.get_successors(snake_case_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(snake_case_ )
else:
# retrieve the best current path
_lowerCAmelCase : Optional[Any] = self.open_nodes.pop(self.open_nodes.index(snake_case_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(snake_case_ )
else:
self.open_nodes.append(snake_case_ )
return [self.start.pos]
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Union[str, Any] = []
for action in delta:
_lowerCAmelCase : Union[str, Any] = parent.pos_x + action[1]
_lowerCAmelCase : Dict = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(snake_case_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
snake_case_ , snake_case_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , snake_case_ , ) )
return successors
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : List[Any] = node
_lowerCAmelCase : Optional[Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_lowerCAmelCase : Optional[int] = current_node.parent
path.reverse()
return path
class a_ :
def __init__( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : List[str] = AStar(snake_case_ , snake_case_ )
_lowerCAmelCase : int = AStar(snake_case_ , snake_case_ )
_lowerCAmelCase : Optional[int] = False
def __UpperCamelCase ( self ):
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
_lowerCAmelCase : Tuple = self.fwd_astar.open_nodes.pop(0 )
_lowerCAmelCase : Optional[Any] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
snake_case_ , snake_case_ )
self.fwd_astar.closed_nodes.append(snake_case_ )
self.bwd_astar.closed_nodes.append(snake_case_ )
_lowerCAmelCase : List[str] = current_bwd_node
_lowerCAmelCase : Dict = current_fwd_node
_lowerCAmelCase : Any = {
self.fwd_astar: self.fwd_astar.get_successors(snake_case_ ),
self.bwd_astar: self.bwd_astar.get_successors(snake_case_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(snake_case_ )
else:
# retrieve the best current path
_lowerCAmelCase : List[Any] = astar.open_nodes.pop(
astar.open_nodes.index(snake_case_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(snake_case_ )
else:
astar.open_nodes.append(snake_case_ )
return [self.fwd_astar.start.pos]
def __UpperCamelCase ( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : int = self.fwd_astar.retrace_path(snake_case_ )
_lowerCAmelCase : Optional[Any] = self.bwd_astar.retrace_path(snake_case_ )
bwd_path.pop()
bwd_path.reverse()
_lowerCAmelCase : Dict = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
UpperCamelCase_ = (0, 0)
UpperCamelCase_ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
UpperCamelCase_ = time.time()
UpperCamelCase_ = AStar(init, goal)
UpperCamelCase_ = a_star.search()
UpperCamelCase_ = time.time() - start_time
print(F'AStar execution time = {end_time:f} seconds')
UpperCamelCase_ = time.time()
UpperCamelCase_ = BidirectionalAStar(init, goal)
UpperCamelCase_ = time.time() - bd_start_time
print(F'BidirectionalAStar execution time = {bd_end_time:f} seconds')
| 309
| 1
|
'''simple docstring'''
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class a_ :
def __init__( self , snake_case_ , snake_case_=1_4 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=9_9 , snake_case_=3_2 , snake_case_=5 , snake_case_=4 , snake_case_=3_7 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_1_2 , snake_case_=1_6 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ):
_lowerCAmelCase : int = parent
_lowerCAmelCase : str = batch_size
_lowerCAmelCase : Optional[int] = seq_length
_lowerCAmelCase : List[Any] = is_training
_lowerCAmelCase : Tuple = use_token_type_ids
_lowerCAmelCase : Union[str, Any] = use_input_mask
_lowerCAmelCase : Tuple = use_labels
_lowerCAmelCase : Union[str, Any] = use_mc_token_ids
_lowerCAmelCase : str = vocab_size
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : Any = num_hidden_layers
_lowerCAmelCase : List[str] = num_attention_heads
_lowerCAmelCase : Tuple = intermediate_size
_lowerCAmelCase : List[str] = hidden_act
_lowerCAmelCase : List[str] = hidden_dropout_prob
_lowerCAmelCase : int = attention_probs_dropout_prob
_lowerCAmelCase : List[Any] = max_position_embeddings
_lowerCAmelCase : List[str] = type_vocab_size
_lowerCAmelCase : Tuple = type_sequence_label_size
_lowerCAmelCase : Optional[Any] = initializer_range
_lowerCAmelCase : List[Any] = num_labels
_lowerCAmelCase : str = num_choices
_lowerCAmelCase : Optional[Any] = scope
_lowerCAmelCase : List[str] = self.vocab_size - 1
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : int = None
if self.use_input_mask:
_lowerCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase : Optional[Any] = None
if self.use_token_type_ids:
_lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase : Optional[Any] = None
if self.use_mc_token_ids:
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : List[str] = None
if self.use_labels:
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase : Any = self.get_config()
_lowerCAmelCase : Optional[Any] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __UpperCamelCase ( self ):
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , *snake_case_ ):
_lowerCAmelCase : Any = CTRLModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
model(snake_case_ , token_type_ids=snake_case_ , head_mask=snake_case_ )
model(snake_case_ , token_type_ids=snake_case_ )
_lowerCAmelCase : int = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , *snake_case_ ):
_lowerCAmelCase : Optional[Any] = CTRLLMHeadModel(snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : Dict = model(snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : str = config_and_inputs
_lowerCAmelCase : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask}
return config, inputs_dict
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , *snake_case_ ):
_lowerCAmelCase : str = self.num_labels
_lowerCAmelCase : str = CTRLForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : Union[str, Any] = model(snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class a_ (_a , _a , _a , unittest.TestCase ):
__lowerCAmelCase : int = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
__lowerCAmelCase : int = (CTRLLMHeadModel,) if is_torch_available() else ()
__lowerCAmelCase : Union[str, Any] = (
{
"""feature-extraction""": CTRLModel,
"""text-classification""": CTRLForSequenceClassification,
"""text-generation""": CTRLLMHeadModel,
"""zero-shot""": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCAmelCase : int = True
__lowerCAmelCase : Optional[int] = False
__lowerCAmelCase : int = False
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = CTRLModelTester(self )
_lowerCAmelCase : Any = ConfigTester(self , config_class=snake_case_ , n_embd=3_7 )
def __UpperCamelCase ( self ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*snake_case_ )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __UpperCamelCase ( self ):
pass
@slow
def __UpperCamelCase ( self ):
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Any = CTRLModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def __UpperCamelCase ( self ):
pass
@require_torch
class a_ (unittest.TestCase ):
def __UpperCamelCase ( self ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def __UpperCamelCase ( self ):
_lowerCAmelCase : Tuple = CTRLLMHeadModel.from_pretrained("""ctrl""" )
model.to(snake_case_ )
_lowerCAmelCase : Union[str, Any] = torch.tensor(
[[1_1_8_5_9, 0, 1_6_1_1, 8]] , dtype=torch.long , device=snake_case_ ) # Legal the president is
_lowerCAmelCase : str = [
1_1_8_5_9,
0,
1_6_1_1,
8,
5,
1_5_0,
2_6_4_4_9,
2,
1_9,
3_4_8,
4_6_9,
3,
2_5_9_5,
4_8,
2_0_7_4_0,
2_4_6_5_3_3,
2_4_6_5_3_3,
1_9,
3_0,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
_lowerCAmelCase : Tuple = model.generate(snake_case_ , do_sample=snake_case_ )
self.assertListEqual(output_ids[0].tolist() , snake_case_ )
| 309
|
'''simple docstring'''
def _UpperCAmelCase ( _lowerCamelCase : list[int] , _lowerCamelCase : str ) -> list[int]:
_lowerCAmelCase : List[Any] = int(_lowerCamelCase )
# Initialize Result
_lowerCAmelCase : Any = []
# Traverse through all denomination
for denomination in reversed(_lowerCamelCase ):
# Find denominations
while int(_lowerCamelCase ) >= int(_lowerCamelCase ):
total_value -= int(_lowerCamelCase )
answer.append(_lowerCamelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
UpperCamelCase_ = []
UpperCamelCase_ = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
UpperCamelCase_ = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F'Denomination {i}: ').strip()))
UpperCamelCase_ = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
UpperCamelCase_ = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
UpperCamelCase_ = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F'Following is minimal change for {value}: ')
UpperCamelCase_ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 309
| 1
|
'''simple docstring'''
import functools
def _UpperCAmelCase ( _lowerCamelCase : list[int] , _lowerCamelCase : list[int] ) -> int:
# Validation
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not all(isinstance(_lowerCamelCase , _lowerCamelCase ) for day in days ):
raise ValueError("""The parameter days should be a list of integers""" )
if len(_lowerCamelCase ) != 3 or not all(isinstance(_lowerCamelCase , _lowerCamelCase ) for cost in costs ):
raise ValueError("""The parameter costs should be a list of three integers""" )
if len(_lowerCamelCase ) == 0:
return 0
if min(_lowerCamelCase ) <= 0:
raise ValueError("""All days elements should be greater than 0""" )
if max(_lowerCamelCase ) >= 3_66:
raise ValueError("""All days elements should be less than 366""" )
_lowerCAmelCase : List[Any] = set(_lowerCamelCase )
@functools.cache
def dynamic_programming(_lowerCamelCase : int ) -> int:
if index > 3_65:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 309
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase_ = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""EncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""TFEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""FlaxEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
| 1
|
'''simple docstring'''
import re
def _UpperCAmelCase ( _lowerCamelCase : str ) -> str:
if len(re.findall("""[ATCG]""" , _lowerCamelCase ) ) != len(_lowerCamelCase ):
raise ValueError("""Invalid Strand""" )
return dna.translate(dna.maketrans("""ATCG""" , """TAGC""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 309
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class a_ (unittest.TestCase ):
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = """laion/clap-htsat-unfused"""
_lowerCAmelCase : int = tempfile.mkdtemp()
def __UpperCamelCase ( self , **snake_case_ ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **snake_case_ )
def __UpperCamelCase ( self , **snake_case_ ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **snake_case_ )
def __UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : List[Any] = self.get_feature_extractor()
_lowerCAmelCase : Union[str, Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : Any = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_lowerCAmelCase : int = self.get_feature_extractor(do_normalize=snake_case_ , padding_value=1.0 )
_lowerCAmelCase : Dict = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = self.get_feature_extractor()
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
_lowerCAmelCase : Union[str, Any] = floats_list((3, 1_0_0_0) )
_lowerCAmelCase : List[str] = feature_extractor(snake_case_ , return_tensors="""np""" )
_lowerCAmelCase : Optional[Any] = processor(audios=snake_case_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = self.get_feature_extractor()
_lowerCAmelCase : List[str] = self.get_tokenizer()
_lowerCAmelCase : Tuple = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
_lowerCAmelCase : Union[str, Any] = """This is a test string"""
_lowerCAmelCase : Union[str, Any] = processor(text=snake_case_ )
_lowerCAmelCase : Optional[int] = tokenizer(snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = self.get_feature_extractor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
_lowerCAmelCase : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase : List[Any] = processor.batch_decode(snake_case_ )
_lowerCAmelCase : Dict = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.get_feature_extractor()
_lowerCAmelCase : Dict = self.get_tokenizer()
_lowerCAmelCase : Optional[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 309
| 1
|
'''simple docstring'''
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class a_ (_a ):
# to overwrite at feature extractactor specific tests
__lowerCAmelCase : int = None
__lowerCAmelCase : Optional[Any] = None
@property
def __UpperCamelCase ( self ):
return self.feat_extract_tester.prepare_feat_extract_dict()
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(snake_case_ , """feature_size""" ) )
self.assertTrue(hasattr(snake_case_ , """sampling_rate""" ) )
self.assertTrue(hasattr(snake_case_ , """padding_value""" ) )
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = self.feat_extract_tester.prepare_inputs_for_common()
_lowerCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCAmelCase : Optional[Any] = feat_extract.model_input_names[0]
_lowerCAmelCase : Dict = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(snake_case_ ) == len(snake_case_ ) for x, y in zip(snake_case_ , processed_features[input_name] ) ) )
_lowerCAmelCase : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=snake_case_ )
_lowerCAmelCase : Optional[Any] = BatchFeature({input_name: speech_inputs} , tensor_type="""np""" )
_lowerCAmelCase : List[str] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_lowerCAmelCase : Union[str, Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[Any] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=snake_case_ )
_lowerCAmelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCAmelCase : Tuple = feat_extract.model_input_names[0]
_lowerCAmelCase : Any = BatchFeature({input_name: speech_inputs} , tensor_type="""pt""" )
_lowerCAmelCase : List[Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_lowerCAmelCase : Dict = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.feat_extract_tester.prepare_inputs_for_common(equal_length=snake_case_ )
_lowerCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCAmelCase : Dict = feat_extract.model_input_names[0]
_lowerCAmelCase : Tuple = BatchFeature({input_name: speech_inputs} , tensor_type="""tf""" )
_lowerCAmelCase : Dict = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_lowerCAmelCase : Union[str, Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def __UpperCamelCase ( self , snake_case_=False ):
def _inputs_have_equal_length(snake_case_ ):
_lowerCAmelCase : Optional[Any] = len(input[0] )
for input_slice in input[1:]:
if len(snake_case_ ) != length:
return False
return True
def _inputs_are_equal(snake_case_ , snake_case_ ):
if len(snake_case_ ) != len(snake_case_ ):
return False
for input_slice_a, input_slice_a in zip(snake_case_ , snake_case_ ):
if not np.allclose(np.asarray(snake_case_ ) , np.asarray(snake_case_ ) , atol=1E-3 ):
return False
return True
_lowerCAmelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCAmelCase : Dict = self.feat_extract_tester.prepare_inputs_for_common(numpify=snake_case_ )
_lowerCAmelCase : str = feat_extract.model_input_names[0]
_lowerCAmelCase : int = BatchFeature({input_name: speech_inputs} )
_lowerCAmelCase : Tuple = self.feat_extract_tester.seq_length_diff
_lowerCAmelCase : List[str] = self.feat_extract_tester.max_seq_length + pad_diff
_lowerCAmelCase : int = self.feat_extract_tester.min_seq_length
_lowerCAmelCase : Union[str, Any] = self.feat_extract_tester.batch_size
_lowerCAmelCase : Optional[int] = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
_lowerCAmelCase : Union[str, Any] = feat_extract.pad(snake_case_ , padding=snake_case_ )
_lowerCAmelCase : Tuple = input_a[input_name]
_lowerCAmelCase : Optional[Any] = feat_extract.pad(snake_case_ , padding="""longest""" )
_lowerCAmelCase : List[str] = input_a[input_name]
_lowerCAmelCase : Tuple = feat_extract.pad(snake_case_ , padding="""max_length""" , max_length=len(speech_inputs[-1] ) )
_lowerCAmelCase : int = input_a[input_name]
_lowerCAmelCase : Optional[int] = feat_extract.pad(snake_case_ , padding="""longest""" , return_tensors="""np""" )
_lowerCAmelCase : Tuple = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(snake_case_ ):
feat_extract.pad(snake_case_ , padding="""max_length""" )[input_name]
_lowerCAmelCase : Any = feat_extract.pad(
snake_case_ , padding="""max_length""" , max_length=snake_case_ , return_tensors="""np""" )
_lowerCAmelCase : Any = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(snake_case_ ) )
self.assertTrue(_inputs_have_equal_length(snake_case_ ) )
self.assertTrue(_inputs_have_equal_length(snake_case_ ) )
self.assertTrue(_inputs_are_equal(snake_case_ , snake_case_ ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
_lowerCAmelCase : int = feat_extract.pad(snake_case_ , pad_to_multiple_of=1_0 )
_lowerCAmelCase : str = input_a[input_name]
_lowerCAmelCase : str = feat_extract.pad(snake_case_ , padding="""longest""" , pad_to_multiple_of=1_0 )
_lowerCAmelCase : Dict = input_a[input_name]
_lowerCAmelCase : Dict = feat_extract.pad(
snake_case_ , padding="""max_length""" , pad_to_multiple_of=1_0 , max_length=snake_case_ )
_lowerCAmelCase : str = input_a[input_name]
_lowerCAmelCase : Any = feat_extract.pad(
snake_case_ , padding="""max_length""" , pad_to_multiple_of=1_0 , max_length=snake_case_ , return_tensors="""np""" , )
_lowerCAmelCase : List[str] = input_a[input_name]
self.assertTrue(all(len(snake_case_ ) % 1_0 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(snake_case_ , snake_case_ ) )
_lowerCAmelCase : Optional[int] = pad_max_length if pad_max_length % 1_0 == 0 else (pad_max_length // 1_0 + 1) * 1_0
self.assertTrue(all(len(snake_case_ ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
_lowerCAmelCase : Optional[int] = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1E-3 )
def __UpperCamelCase ( self , snake_case_=False ):
def _inputs_have_equal_length(snake_case_ ):
_lowerCAmelCase : Union[str, Any] = len(input[0] )
for input_slice in input[1:]:
if len(snake_case_ ) != length:
return False
return True
def _inputs_are_equal(snake_case_ , snake_case_ ):
if len(snake_case_ ) != len(snake_case_ ):
return False
for input_slice_a, input_slice_a in zip(snake_case_ , snake_case_ ):
if not np.allclose(np.asarray(snake_case_ ) , np.asarray(snake_case_ ) , atol=1E-3 ):
return False
return True
_lowerCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCAmelCase : Dict = self.feat_extract_tester.prepare_inputs_for_common(numpify=snake_case_ )
_lowerCAmelCase : Dict = feat_extract.model_input_names[0]
_lowerCAmelCase : List[Any] = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
_lowerCAmelCase : Dict = feat_extract.pad(
snake_case_ , padding="""max_length""" , max_length=len(speech_inputs[0] ) , truncation=snake_case_ )
_lowerCAmelCase : Optional[int] = input_a[input_name]
_lowerCAmelCase : Optional[int] = feat_extract.pad(snake_case_ , padding="""max_length""" , max_length=len(speech_inputs[0] ) )
_lowerCAmelCase : Tuple = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(snake_case_ ) )
self.assertFalse(_inputs_have_equal_length(snake_case_ ) )
# truncate to smallest with np
_lowerCAmelCase : Dict = feat_extract.pad(
snake_case_ , padding="""max_length""" , max_length=len(speech_inputs[0] ) , return_tensors="""np""" , truncation=snake_case_ , )
_lowerCAmelCase : int = input_a[input_name]
_lowerCAmelCase : List[Any] = feat_extract.pad(
snake_case_ , padding="""max_length""" , max_length=len(speech_inputs[0] ) , return_tensors="""np""" )
_lowerCAmelCase : Tuple = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(snake_case_ ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(snake_case_ ) )
# truncate to middle
_lowerCAmelCase : Optional[int] = feat_extract.pad(
snake_case_ , padding="""max_length""" , max_length=len(speech_inputs[1] ) , truncation=snake_case_ , return_tensors="""np""" , )
_lowerCAmelCase : List[Any] = input_a[input_name]
_lowerCAmelCase : Union[str, Any] = feat_extract.pad(
snake_case_ , padding="""max_length""" , max_length=len(speech_inputs[1] ) , truncation=snake_case_ )
_lowerCAmelCase : List[Any] = input_a[input_name]
_lowerCAmelCase : Any = feat_extract.pad(
snake_case_ , padding="""max_length""" , max_length=len(speech_inputs[1] ) , return_tensors="""np""" )
_lowerCAmelCase : List[Any] = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(snake_case_ ) )
self.assertTrue(_inputs_have_equal_length(snake_case_ ) )
self.assertTrue(_inputs_are_equal(snake_case_ , snake_case_ ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(snake_case_ ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(snake_case_ ):
feat_extract.pad(snake_case_ , truncation=snake_case_ )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(snake_case_ ):
feat_extract.pad(snake_case_ , padding="""longest""" , truncation=snake_case_ )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(snake_case_ ):
feat_extract.pad(snake_case_ , padding="""longest""" , truncation=snake_case_ )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(snake_case_ ):
feat_extract.pad(snake_case_ , padding="""max_length""" , truncation=snake_case_ )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
_lowerCAmelCase : int = 1_2
_lowerCAmelCase : Optional[Any] = feat_extract.pad(
snake_case_ , padding="""max_length""" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=snake_case_ , truncation=snake_case_ , )
_lowerCAmelCase : Any = input_a[input_name]
_lowerCAmelCase : List[Any] = feat_extract.pad(
snake_case_ , padding="""max_length""" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=snake_case_ , )
_lowerCAmelCase : str = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
_lowerCAmelCase : List[Any] = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
_lowerCAmelCase : Dict = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(snake_case_ ) )
self.assertFalse(_inputs_have_equal_length(snake_case_ ) )
def __UpperCamelCase ( self ):
self._check_padding(numpify=snake_case_ )
def __UpperCamelCase ( self ):
self._check_padding(numpify=snake_case_ )
def __UpperCamelCase ( self ):
self._check_truncation(numpify=snake_case_ )
def __UpperCamelCase ( self ):
self._check_truncation(numpify=snake_case_ )
@require_torch
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCAmelCase : Dict = self.feat_extract_tester.prepare_inputs_for_common()
_lowerCAmelCase : List[Any] = feat_extract.model_input_names[0]
_lowerCAmelCase : Optional[Any] = BatchFeature({input_name: speech_inputs} )
_lowerCAmelCase : Dict = feat_extract.pad(snake_case_ , padding="""longest""" , return_tensors="""np""" )[input_name]
_lowerCAmelCase : str = feat_extract.pad(snake_case_ , padding="""longest""" , return_tensors="""pt""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
@require_tf
def __UpperCamelCase ( self ):
_lowerCAmelCase : Any = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCAmelCase : Dict = self.feat_extract_tester.prepare_inputs_for_common()
_lowerCAmelCase : Tuple = feat_extract.model_input_names[0]
_lowerCAmelCase : Tuple = BatchFeature({input_name: speech_inputs} )
_lowerCAmelCase : List[Any] = feat_extract.pad(snake_case_ , padding="""longest""" , return_tensors="""np""" )[input_name]
_lowerCAmelCase : Optional[Any] = feat_extract.pad(snake_case_ , padding="""longest""" , return_tensors="""tf""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[Any] = self.feat_extract_dict
_lowerCAmelCase : Dict = True
_lowerCAmelCase : Union[str, Any] = self.feature_extraction_class(**snake_case_ )
_lowerCAmelCase : Dict = self.feat_extract_tester.prepare_inputs_for_common()
_lowerCAmelCase : List[Any] = [len(snake_case_ ) for x in speech_inputs]
_lowerCAmelCase : Tuple = feat_extract.model_input_names[0]
_lowerCAmelCase : Any = BatchFeature({input_name: speech_inputs} )
_lowerCAmelCase : Tuple = feat_extract.pad(snake_case_ , padding="""longest""" , return_tensors="""np""" )
self.assertIn("""attention_mask""" , snake_case_ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[Any] = self.feat_extract_dict
_lowerCAmelCase : Optional[Any] = True
_lowerCAmelCase : int = self.feature_extraction_class(**snake_case_ )
_lowerCAmelCase : int = self.feat_extract_tester.prepare_inputs_for_common()
_lowerCAmelCase : Any = [len(snake_case_ ) for x in speech_inputs]
_lowerCAmelCase : Union[str, Any] = feat_extract.model_input_names[0]
_lowerCAmelCase : Dict = BatchFeature({input_name: speech_inputs} )
_lowerCAmelCase : List[Any] = min(snake_case_ )
_lowerCAmelCase : Optional[int] = feat_extract.pad(
snake_case_ , padding="""max_length""" , max_length=snake_case_ , truncation=snake_case_ , return_tensors="""np""" )
self.assertIn("""attention_mask""" , snake_case_ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 309
|
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = """▁"""
UpperCamelCase_ = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
UpperCamelCase_ = {
"""vocab_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json""",
},
"""spm_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model""",
},
"""tokenizer_config_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json""",
},
}
UpperCamelCase_ = {
"""facebook/m2m100_418M""": 10_24,
}
# fmt: off
UpperCamelCase_ = {
"""m2m100""": ["""af""", """am""", """ar""", """ast""", """az""", """ba""", """be""", """bg""", """bn""", """br""", """bs""", """ca""", """ceb""", """cs""", """cy""", """da""", """de""", """el""", """en""", """es""", """et""", """fa""", """ff""", """fi""", """fr""", """fy""", """ga""", """gd""", """gl""", """gu""", """ha""", """he""", """hi""", """hr""", """ht""", """hu""", """hy""", """id""", """ig""", """ilo""", """is""", """it""", """ja""", """jv""", """ka""", """kk""", """km""", """kn""", """ko""", """lb""", """lg""", """ln""", """lo""", """lt""", """lv""", """mg""", """mk""", """ml""", """mn""", """mr""", """ms""", """my""", """ne""", """nl""", """no""", """ns""", """oc""", """or""", """pa""", """pl""", """ps""", """pt""", """ro""", """ru""", """sd""", """si""", """sk""", """sl""", """so""", """sq""", """sr""", """ss""", """su""", """sv""", """sw""", """ta""", """th""", """tl""", """tn""", """tr""", """uk""", """ur""", """uz""", """vi""", """wo""", """xh""", """yi""", """yo""", """zh""", """zu"""],
"""wmt21""": ["""en""", """ha""", """is""", """ja""", """cs""", """ru""", """zh""", """de"""]
}
class a_ (_a ):
__lowerCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
__lowerCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Dict = ["""input_ids""", """attention_mask"""]
__lowerCAmelCase : List[int] = []
__lowerCAmelCase : List[int] = []
def __init__( self , snake_case_ , snake_case_ , snake_case_=None , snake_case_=None , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<pad>" , snake_case_="<unk>" , snake_case_="m2m100" , snake_case_ = None , snake_case_=8 , **snake_case_ , ):
_lowerCAmelCase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCAmelCase : Optional[Any] = language_codes
_lowerCAmelCase : Tuple = FAIRSEQ_LANGUAGE_CODES[language_codes]
_lowerCAmelCase : str = {lang_code: f'__{lang_code}__' for lang_code in fairseq_language_code}
_lowerCAmelCase : int = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(snake_case_ )
for lang_code in fairseq_language_code
if self.get_lang_token(snake_case_ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=snake_case_ , tgt_lang=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , language_codes=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=snake_case_ , **snake_case_ , )
_lowerCAmelCase : Optional[int] = vocab_file
_lowerCAmelCase : Any = load_json(snake_case_ )
_lowerCAmelCase : str = {v: k for k, v in self.encoder.items()}
_lowerCAmelCase : Union[str, Any] = spm_file
_lowerCAmelCase : Tuple = load_spm(snake_case_ , self.sp_model_kwargs )
_lowerCAmelCase : int = len(self.encoder )
_lowerCAmelCase : Union[str, Any] = {
self.get_lang_token(snake_case_ ): self.encoder_size + i for i, lang_code in enumerate(snake_case_ )
}
_lowerCAmelCase : List[str] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(snake_case_ )}
_lowerCAmelCase : Optional[Any] = {v: k for k, v in self.lang_token_to_id.items()}
_lowerCAmelCase : Any = src_lang if src_lang is not None else """en"""
_lowerCAmelCase : Optional[int] = tgt_lang
_lowerCAmelCase : Tuple = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
_lowerCAmelCase : List[Any] = num_madeup_words
@property
def __UpperCamelCase ( self ):
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def __UpperCamelCase ( self ):
return self._src_lang
@src_lang.setter
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCamelCase ( self , snake_case_ ):
return self.sp_model.encode(snake_case_ , out_type=snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(snake_case_ , self.encoder[self.unk_token] )
def __UpperCamelCase ( self , snake_case_ ):
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(snake_case_ , self.unk_token )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = []
_lowerCAmelCase : Optional[int] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(snake_case_ ) + token
_lowerCAmelCase : Optional[Any] = []
else:
current_sub_tokens.append(snake_case_ )
out_string += self.sp_model.decode(snake_case_ )
return out_string.strip()
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
_lowerCAmelCase : List[Any] = [1] * len(self.prefix_tokens )
_lowerCAmelCase : Dict = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(snake_case_ )) + suffix_ones
return prefix_ones + ([0] * len(snake_case_ )) + ([0] * len(snake_case_ )) + suffix_ones
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
_lowerCAmelCase : int = self.__dict__.copy()
_lowerCAmelCase : str = None
return state
def __setstate__( self , snake_case_ ):
_lowerCAmelCase : List[str] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase : str = {}
_lowerCAmelCase : str = load_spm(self.spm_file , self.sp_model_kwargs )
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : Dict = Path(snake_case_ )
if not save_dir.is_dir():
raise OSError(f'{save_directory} should be a directory' )
_lowerCAmelCase : Any = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
_lowerCAmelCase : Any = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , snake_case_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , snake_case_ )
elif not os.path.isfile(self.spm_file ):
with open(snake_case_ , """wb""" ) as fi:
_lowerCAmelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (str(snake_case_ ), str(snake_case_ ))
def __UpperCamelCase ( self , snake_case_ , snake_case_ = "en" , snake_case_ = None , snake_case_ = "ro" , **snake_case_ , ):
_lowerCAmelCase : Union[str, Any] = src_lang
_lowerCAmelCase : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(snake_case_ , snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
_lowerCAmelCase : Dict = src_lang
_lowerCAmelCase : str = self(snake_case_ , add_special_tokens=snake_case_ , **snake_case_ )
_lowerCAmelCase : Union[str, Any] = self.get_lang_id(snake_case_ )
_lowerCAmelCase : Tuple = tgt_lang_id
return inputs
def __UpperCamelCase ( self ):
self.set_src_lang_special_tokens(self.src_lang )
def __UpperCamelCase ( self ):
self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Optional[Any] = self.get_lang_token(snake_case_ )
_lowerCAmelCase : List[Any] = self.lang_token_to_id[lang_token]
_lowerCAmelCase : Any = [self.cur_lang_id]
_lowerCAmelCase : Any = [self.eos_token_id]
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = self.get_lang_token(snake_case_ )
_lowerCAmelCase : int = self.lang_token_to_id[lang_token]
_lowerCAmelCase : str = [self.cur_lang_id]
_lowerCAmelCase : str = [self.eos_token_id]
def __UpperCamelCase ( self , snake_case_ ):
return self.lang_code_to_token[lang]
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : List[str] = self.get_lang_token(snake_case_ )
return self.lang_token_to_id[lang_token]
def _UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
_lowerCAmelCase : Optional[Any] = sentencepiece.SentencePieceProcessor(**_lowerCamelCase )
spm.Load(str(_lowerCamelCase ) )
return spm
def _UpperCAmelCase ( _lowerCamelCase : str ) -> Union[Dict, List]:
with open(_lowerCamelCase , """r""" ) as f:
return json.load(_lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : str ) -> None:
with open(_lowerCamelCase , """w""" ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase , indent=2 )
| 309
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
"""configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""],
"""feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""],
"""processing_mctct""": ["""MCTCTProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MCTCTForCTC""",
"""MCTCTModel""",
"""MCTCTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
|
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def _UpperCAmelCase ( _lowerCamelCase : Callable , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ) -> np.ndarray:
_lowerCAmelCase : Union[str, Any] = int(np.ceil((x_end - xa) / step_size ) )
_lowerCAmelCase : Tuple = np.zeros((n + 1,) )
_lowerCAmelCase : List[Any] = ya
_lowerCAmelCase : int = xa
for k in range(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = y[k] + step_size * ode_func(_lowerCamelCase , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 309
| 1
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a_ (unittest.TestCase ):
@property
def __UpperCamelCase ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Optional[Any] = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
@property
def __UpperCamelCase ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = VQModel(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , )
return model
@property
def __UpperCamelCase ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[Any] = self.dummy_uncond_unet
_lowerCAmelCase : Optional[int] = DDIMScheduler()
_lowerCAmelCase : List[str] = self.dummy_vq_model
_lowerCAmelCase : Tuple = LDMPipeline(unet=snake_case_ , vqvae=snake_case_ , scheduler=snake_case_ )
ldm.to(snake_case_ )
ldm.set_progress_bar_config(disable=snake_case_ )
_lowerCAmelCase : int = torch.manual_seed(0 )
_lowerCAmelCase : int = ldm(generator=snake_case_ , num_inference_steps=2 , output_type="""numpy""" ).images
_lowerCAmelCase : List[str] = torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = ldm(generator=snake_case_ , num_inference_steps=2 , output_type="""numpy""" , return_dict=snake_case_ )[0]
_lowerCAmelCase : str = image[0, -3:, -3:, -1]
_lowerCAmelCase : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_lowerCAmelCase : int = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
_lowerCAmelCase : Union[str, Any] = 1E-2 if torch_device != """mps""" else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class a_ (unittest.TestCase ):
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(snake_case_ )
ldm.set_progress_bar_config(disable=snake_case_ )
_lowerCAmelCase : int = torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = ldm(generator=snake_case_ , num_inference_steps=5 , output_type="""numpy""" ).images
_lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
_lowerCAmelCase : Dict = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
_lowerCAmelCase : Union[str, Any] = 1E-2 if torch_device != """mps""" else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 309
|
'''simple docstring'''
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("""socket.socket""" )
@patch("""builtins.open""" )
def _UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] ) -> Union[str, Any]:
# ===== initialization =====
_lowerCAmelCase : Tuple = Mock()
_lowerCAmelCase : Any = conn, Mock()
_lowerCAmelCase : Optional[Any] = iter([1, None] )
_lowerCAmelCase : str = lambda _lowerCamelCase : next(_lowerCamelCase )
# ===== invoke =====
send_file(filename="""mytext.txt""" , testing=_lowerCamelCase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 309
| 1
|
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : list[str] | None = None ) -> list[list[str]]:
_lowerCAmelCase : List[Any] = word_bank or []
# create a table
_lowerCAmelCase : int = len(_lowerCamelCase ) + 1
_lowerCAmelCase : list[list[list[str]]] = []
for _ in range(_lowerCamelCase ):
table.append([] )
# seed value
_lowerCAmelCase : Optional[Any] = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(_lowerCamelCase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(_lowerCamelCase )] == word:
_lowerCAmelCase : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(_lowerCamelCase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(_lowerCamelCase )]:
combination.reverse()
return table[len(_lowerCamelCase )]
if __name__ == "__main__":
print(all_construct("""jwajalapa""", ["""jwa""", """j""", """w""", """a""", """la""", """lapa"""]))
print(all_construct("""rajamati""", ["""s""", """raj""", """amat""", """raja""", """ma""", """i""", """t"""]))
print(
all_construct(
"""hexagonosaurus""",
["""h""", """ex""", """hex""", """ag""", """ago""", """ru""", """auru""", """rus""", """go""", """no""", """o""", """s"""],
)
)
| 309
|
'''simple docstring'''
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a_ :
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=3_0 , snake_case_=2 , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=3_2 , snake_case_=5 , snake_case_=4 , snake_case_=3_7 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=1_0 , snake_case_=0.02 , snake_case_=None , ):
_lowerCAmelCase : Optional[Any] = parent
_lowerCAmelCase : Any = batch_size
_lowerCAmelCase : Tuple = image_size
_lowerCAmelCase : int = patch_size
_lowerCAmelCase : Any = num_channels
_lowerCAmelCase : str = is_training
_lowerCAmelCase : Any = use_labels
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : int = num_hidden_layers
_lowerCAmelCase : Dict = num_attention_heads
_lowerCAmelCase : Union[str, Any] = intermediate_size
_lowerCAmelCase : Dict = hidden_act
_lowerCAmelCase : str = hidden_dropout_prob
_lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
_lowerCAmelCase : Any = type_sequence_label_size
_lowerCAmelCase : str = initializer_range
_lowerCAmelCase : Optional[Any] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase : List[Any] = (image_size // patch_size) ** 2
_lowerCAmelCase : Dict = num_patches + 1
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : List[str] = None
if self.use_labels:
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ):
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : List[Any] = ViTMSNModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : Tuple = self.type_sequence_label_size
_lowerCAmelCase : int = ViTMSNForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : Optional[int] = model(snake_case_ , labels=snake_case_ )
print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" )
print("""Labels: {labels}""" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCAmelCase : int = 1
_lowerCAmelCase : List[str] = ViTMSNForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[int] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = config_and_inputs
_lowerCAmelCase : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a_ (_a , _a , unittest.TestCase ):
__lowerCAmelCase : Tuple = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__lowerCAmelCase : Optional[int] = (
{"""feature-extraction""": ViTMSNModel, """image-classification""": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__lowerCAmelCase : Dict = False
__lowerCAmelCase : Optional[Any] = False
__lowerCAmelCase : List[str] = False
__lowerCAmelCase : Any = False
def __UpperCamelCase ( self ):
_lowerCAmelCase : Tuple = ViTMSNModelTester(self )
_lowerCAmelCase : int = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMSN does not use inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : List[str] = model_class(snake_case_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case_ , nn.Linear ) )
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[int] = model_class(snake_case_ )
_lowerCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Optional[Any] = [*signature.parameters.keys()]
_lowerCAmelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@slow
def __UpperCamelCase ( self ):
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Optional[int] = ViTMSNModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def _UpperCAmelCase ( ) -> Tuple:
_lowerCAmelCase : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a_ (unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self ):
return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None
@slow
def __UpperCamelCase ( self ):
torch.manual_seed(2 )
_lowerCAmelCase : Dict = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(snake_case_ )
_lowerCAmelCase : Dict = self.default_image_processor
_lowerCAmelCase : Any = prepare_img()
_lowerCAmelCase : List[str] = image_processor(images=snake_case_ , return_tensors="""pt""" ).to(snake_case_ )
# forward pass
with torch.no_grad():
_lowerCAmelCase : Dict = model(**snake_case_ )
# verify the logits
_lowerCAmelCase : Dict = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case_ )
_lowerCAmelCase : Tuple = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1E-4 ) )
| 309
| 1
|
'''simple docstring'''
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
UpperCamelCase_ = None
try:
import msvcrt
except ImportError:
UpperCamelCase_ = None
try:
import fcntl
except ImportError:
UpperCamelCase_ = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
UpperCamelCase_ = OSError
# Data
# ------------------------------------------------
UpperCamelCase_ = [
"""Timeout""",
"""BaseFileLock""",
"""WindowsFileLock""",
"""UnixFileLock""",
"""SoftFileLock""",
"""FileLock""",
]
UpperCamelCase_ = """3.0.12"""
UpperCamelCase_ = None
def _UpperCAmelCase ( ) -> Union[str, Any]:
global _logger
_lowerCAmelCase : int = _logger or logging.getLogger(__name__ )
return _logger
class a_ (_a ):
def __init__( self , snake_case_ ):
_lowerCAmelCase : List[Any] = lock_file
return None
def __str__( self ):
_lowerCAmelCase : Optional[Any] = f'The file lock \'{self.lock_file}\' could not be acquired.'
return temp
class a_ :
def __init__( self , snake_case_ ):
_lowerCAmelCase : Dict = lock
return None
def __enter__( self ):
return self.lock
def __exit__( self , snake_case_ , snake_case_ , snake_case_ ):
self.lock.release()
return None
class a_ :
def __init__( self , snake_case_ , snake_case_=-1 , snake_case_=None ):
_lowerCAmelCase : Union[str, Any] = max_filename_length if max_filename_length is not None else 2_5_5
# Hash the filename if it's too long
_lowerCAmelCase : Optional[Any] = self.hash_filename_if_too_long(snake_case_ , snake_case_ )
# The path to the lock file.
_lowerCAmelCase : Optional[int] = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
_lowerCAmelCase : Tuple = None
# The default timeout value.
_lowerCAmelCase : Optional[Any] = timeout
# We use this lock primarily for the lock counter.
_lowerCAmelCase : Optional[Any] = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
_lowerCAmelCase : List[str] = 0
return None
@property
def __UpperCamelCase ( self ):
return self._lock_file
@property
def __UpperCamelCase ( self ):
return self._timeout
@timeout.setter
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Optional[Any] = float(snake_case_ )
return None
def __UpperCamelCase ( self ):
raise NotImplementedError()
def __UpperCamelCase ( self ):
raise NotImplementedError()
@property
def __UpperCamelCase ( self ):
return self._lock_file_fd is not None
def __UpperCamelCase ( self , snake_case_=None , snake_case_=0.05 ):
# Use the default timeout, if no timeout is provided.
if timeout is None:
_lowerCAmelCase : Tuple = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
_lowerCAmelCase : Optional[int] = id(self )
_lowerCAmelCase : List[Any] = self._lock_file
_lowerCAmelCase : Tuple = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f'Attempting to acquire lock {lock_id} on {lock_filename}' )
self._acquire()
if self.is_locked:
logger().debug(f'Lock {lock_id} acquired on {lock_filename}' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f'Timeout on acquiring lock {lock_id} on {lock_filename}' )
raise Timeout(self._lock_file )
else:
logger().debug(
f'Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...' )
time.sleep(snake_case_ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
_lowerCAmelCase : Dict = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def __UpperCamelCase ( self , snake_case_=False ):
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
_lowerCAmelCase : Any = id(self )
_lowerCAmelCase : List[str] = self._lock_file
logger().debug(f'Attempting to release lock {lock_id} on {lock_filename}' )
self._release()
_lowerCAmelCase : Tuple = 0
logger().debug(f'Lock {lock_id} released on {lock_filename}' )
return None
def __enter__( self ):
self.acquire()
return self
def __exit__( self , snake_case_ , snake_case_ , snake_case_ ):
self.release()
return None
def __del__( self ):
self.release(force=snake_case_ )
return None
def __UpperCamelCase ( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : Dict = os.path.basename(snake_case_ )
if len(snake_case_ ) > max_length and max_length > 0:
_lowerCAmelCase : Union[str, Any] = os.path.dirname(snake_case_ )
_lowerCAmelCase : Any = str(hash(snake_case_ ) )
_lowerCAmelCase : Optional[Any] = filename[: max_length - len(snake_case_ ) - 8] + """...""" + hashed_filename + """.lock"""
return os.path.join(snake_case_ , snake_case_ )
else:
return path
class a_ (_a ):
def __init__( self , snake_case_ , snake_case_=-1 , snake_case_=None ):
from .file_utils import relative_to_absolute_path
super().__init__(snake_case_ , timeout=snake_case_ , max_filename_length=snake_case_ )
_lowerCAmelCase : List[str] = """\\\\?\\""" + relative_to_absolute_path(self.lock_file )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Tuple = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
_lowerCAmelCase : Optional[Any] = os.open(self._lock_file , snake_case_ )
except OSError:
pass
else:
try:
msvcrt.locking(snake_case_ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(snake_case_ )
else:
_lowerCAmelCase : Union[str, Any] = fd
return None
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self._lock_file_fd
_lowerCAmelCase : List[str] = None
msvcrt.locking(snake_case_ , msvcrt.LK_UNLCK , 1 )
os.close(snake_case_ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class a_ (_a ):
def __init__( self , snake_case_ , snake_case_=-1 , snake_case_=None ):
_lowerCAmelCase : Union[str, Any] = os.statvfs(os.path.dirname(snake_case_ ) ).f_namemax
super().__init__(snake_case_ , timeout=snake_case_ , max_filename_length=snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = os.O_RDWR | os.O_CREAT | os.O_TRUNC
_lowerCAmelCase : List[str] = os.open(self._lock_file , snake_case_ )
try:
fcntl.flock(snake_case_ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(snake_case_ )
else:
_lowerCAmelCase : Optional[Any] = fd
return None
def __UpperCamelCase ( self ):
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
_lowerCAmelCase : Union[str, Any] = self._lock_file_fd
_lowerCAmelCase : Optional[Any] = None
fcntl.flock(snake_case_ , fcntl.LOCK_UN )
os.close(snake_case_ )
return None
class a_ (_a ):
def __UpperCamelCase ( self ):
_lowerCAmelCase : Any = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
_lowerCAmelCase : List[Any] = os.open(self._lock_file , snake_case_ )
except OSError:
pass
else:
_lowerCAmelCase : List[str] = fd
return None
def __UpperCamelCase ( self ):
os.close(self._lock_file_fd )
_lowerCAmelCase : List[str] = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
UpperCamelCase_ = None
if msvcrt:
UpperCamelCase_ = WindowsFileLock
elif fcntl:
UpperCamelCase_ = UnixFileLock
else:
UpperCamelCase_ = SoftFileLock
if warnings is not None:
warnings.warn("""only soft file lock is available""")
| 309
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class a_ (_a ):
__lowerCAmelCase : List[Any] = """microsoft/speecht5_tts"""
__lowerCAmelCase : List[Any] = (
"""This is a tool that reads an English text out loud. It takes an input named `text` which should contain the """
"""text to read (in English) and returns a waveform object containing the sound."""
)
__lowerCAmelCase : List[str] = """text_reader"""
__lowerCAmelCase : Optional[Any] = SpeechTaProcessor
__lowerCAmelCase : str = SpeechTaForTextToSpeech
__lowerCAmelCase : int = SpeechTaHifiGan
__lowerCAmelCase : int = ["""text"""]
__lowerCAmelCase : int = ["""audio"""]
def __UpperCamelCase ( self ):
if self.post_processor is None:
_lowerCAmelCase : int = """microsoft/speecht5_hifigan"""
super().setup()
def __UpperCamelCase ( self , snake_case_ , snake_case_=None ):
_lowerCAmelCase : Tuple = self.pre_processor(text=snake_case_ , return_tensors="""pt""" , truncation=snake_case_ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" )
_lowerCAmelCase : List[str] = load_dataset("""Matthijs/cmu-arctic-xvectors""" , split="""validation""" )
_lowerCAmelCase : Any = torch.tensor(embeddings_dataset[7_3_0_5]["""xvector"""] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def __UpperCamelCase ( self , snake_case_ ):
with torch.no_grad():
return self.model.generate_speech(**snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
with torch.no_grad():
return self.post_processor(snake_case_ ).cpu().detach()
| 309
| 1
|
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 309
|
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _UpperCAmelCase ( ) -> Tuple:
_lowerCAmelCase : List[Any] = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"""
_lowerCAmelCase : int = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert("""RGB""" )
return image
def _UpperCAmelCase ( _lowerCamelCase : Any ) -> Dict:
_lowerCAmelCase : str = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.weight', f'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.bias', f'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.weight', f'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.bias', f'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.qkv.weight', f'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.weight', f'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.bias', f'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.weight', f'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.bias', f'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.weight', f'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.bias', f'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") )
# fmt: on
return rename_keys
def _UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any] ) -> Optional[Any]:
_lowerCAmelCase : str = dct.pop(_lowerCamelCase )
_lowerCAmelCase : str = val
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple ) -> Tuple:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_lowerCAmelCase : Tuple = state_dict.pop(f'visual_encoder.blocks.{i}.attn.q_bias' )
_lowerCAmelCase : Optional[Any] = state_dict.pop(f'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
_lowerCAmelCase : int = torch.cat((q_bias, torch.zeros_like(_lowerCamelCase , requires_grad=_lowerCamelCase ), v_bias) )
_lowerCAmelCase : str = qkv_bias
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] ) -> List[Any]:
_lowerCAmelCase : str = 3_64 if """coco""" in model_name else 2_24
_lowerCAmelCase : str = BlipaVisionConfig(image_size=_lowerCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
_lowerCAmelCase : int = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=_lowerCamelCase ).to_dict()
elif "opt-6.7b" in model_name:
_lowerCAmelCase : Union[str, Any] = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=_lowerCamelCase ).to_dict()
elif "t5-xl" in model_name:
_lowerCAmelCase : Optional[int] = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_lowerCAmelCase : str = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
_lowerCAmelCase : Dict = BlipaConfig(vision_config=_lowerCamelCase , text_config=_lowerCamelCase )
return config, image_size
@torch.no_grad()
def _UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : List[Any]=None , _lowerCamelCase : int=False ) -> List[str]:
_lowerCAmelCase : int = (
AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" )
if """opt""" in model_name
else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" )
)
_lowerCAmelCase : List[Any] = tokenizer("""\n""" , add_special_tokens=_lowerCamelCase ).input_ids[0]
_lowerCAmelCase , _lowerCAmelCase : List[str] = get_blipa_config(_lowerCamelCase , eos_token_id=_lowerCamelCase )
_lowerCAmelCase : Optional[int] = BlipaForConditionalGeneration(_lowerCamelCase ).eval()
_lowerCAmelCase : Union[str, Any] = {
"""blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""),
"""blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""),
"""blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""),
"""blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""),
"""blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""),
"""blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""),
"""blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""),
}
_lowerCAmelCase , _lowerCAmelCase : List[str] = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
_lowerCAmelCase : Dict = """cuda""" if torch.cuda.is_available() else """cpu"""
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = load_model_and_preprocess(
name=_lowerCamelCase , model_type=_lowerCamelCase , is_eval=_lowerCamelCase , device=_lowerCamelCase )
original_model.eval()
print("""Done!""" )
# update state dict keys
_lowerCAmelCase : List[Any] = original_model.state_dict()
_lowerCAmelCase : Optional[int] = create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_lowerCAmelCase : Tuple = state_dict.pop(_lowerCamelCase )
if key.startswith("""Qformer.bert""" ):
_lowerCAmelCase : List[Any] = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
_lowerCAmelCase : Optional[int] = key.replace("""self""" , """attention""" )
if "opt_proj" in key:
_lowerCAmelCase : Dict = key.replace("""opt_proj""" , """language_projection""" )
if "t5_proj" in key:
_lowerCAmelCase : Tuple = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""opt""" ):
_lowerCAmelCase : List[Any] = key.replace("""opt""" , """language""" )
if key.startswith("""t5""" ):
_lowerCAmelCase : int = key.replace("""t5""" , """language""" )
_lowerCAmelCase : Tuple = val
# read in qv biases
read_in_q_v_bias(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = hf_model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert len(_lowerCamelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
_lowerCAmelCase : Union[str, Any] = load_demo_image()
_lowerCAmelCase : Optional[int] = vis_processors["""eval"""](_lowerCamelCase ).unsqueeze(0 ).to(_lowerCamelCase )
_lowerCAmelCase : List[str] = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(_lowerCamelCase )
# create processor
_lowerCAmelCase : Optional[int] = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=_lowerCamelCase , image_std=_lowerCamelCase )
_lowerCAmelCase : Tuple = BlipaProcessor(image_processor=_lowerCamelCase , tokenizer=_lowerCamelCase )
_lowerCAmelCase : Any = processor(images=_lowerCamelCase , return_tensors="""pt""" ).pixel_values.to(_lowerCamelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
original_model.to(_lowerCamelCase )
hf_model.to(_lowerCamelCase )
with torch.no_grad():
if "opt" in model_name:
_lowerCAmelCase : Optional[Any] = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits
_lowerCAmelCase : Optional[Any] = hf_model(_lowerCamelCase , _lowerCamelCase ).logits
else:
_lowerCAmelCase : List[Any] = original_model(
{"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits
_lowerCAmelCase : Tuple = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
_lowerCAmelCase : Dict = hf_model(_lowerCamelCase , _lowerCamelCase , labels=_lowerCamelCase ).logits
assert original_logits.shape == logits.shape
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
_lowerCAmelCase : Any = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=_lowerCamelCase )
assert torch.allclose(logits[0, :3, :3] , _lowerCamelCase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
_lowerCAmelCase : List[Any] = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=_lowerCamelCase )
else:
# cast to same type
_lowerCAmelCase : Union[str, Any] = logits.dtype
assert torch.allclose(original_logits.to(_lowerCamelCase ) , _lowerCamelCase , atol=1e-2 )
print("""Looks ok!""" )
print("""Generating a caption...""" )
_lowerCAmelCase : Optional[int] = """"""
_lowerCAmelCase : Union[str, Any] = tokenizer(_lowerCamelCase , return_tensors="""pt""" ).input_ids.to(_lowerCamelCase )
_lowerCAmelCase : List[Any] = original_model.generate({"""image""": original_pixel_values} )
_lowerCAmelCase : Dict = hf_model.generate(
_lowerCamelCase , _lowerCamelCase , do_sample=_lowerCamelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("""Original generation:""" , _lowerCamelCase )
_lowerCAmelCase : int = input_ids.shape[1]
_lowerCAmelCase : str = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowerCamelCase )
_lowerCAmelCase : List[str] = [text.strip() for text in output_text]
print("""HF generation:""" , _lowerCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if push_to_hub:
processor.push_to_hub(f'nielsr/{model_name}' )
hf_model.push_to_hub(f'nielsr/{model_name}' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
UpperCamelCase_ = [
"""blip2-opt-2.7b""",
"""blip2-opt-6.7b""",
"""blip2-opt-2.7b-coco""",
"""blip2-opt-6.7b-coco""",
"""blip2-flan-t5-xl""",
"""blip2-flan-t5-xl-coco""",
"""blip2-flan-t5-xxl""",
]
parser.add_argument(
"""--model_name""",
default="""blip2-opt-2.7b""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
UpperCamelCase_ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 309
| 1
|
'''simple docstring'''
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("""socket.socket""" )
@patch("""builtins.open""" )
def _UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] ) -> Union[str, Any]:
# ===== initialization =====
_lowerCAmelCase : Tuple = Mock()
_lowerCAmelCase : Any = conn, Mock()
_lowerCAmelCase : Optional[Any] = iter([1, None] )
_lowerCAmelCase : str = lambda _lowerCamelCase : next(_lowerCamelCase )
# ===== invoke =====
send_file(filename="""mytext.txt""" , testing=_lowerCamelCase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 309
|
'''simple docstring'''
import argparse
import os
import re
UpperCamelCase_ = """src/diffusers"""
# Pattern that looks at the indentation in a line.
UpperCamelCase_ = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
UpperCamelCase_ = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
UpperCamelCase_ = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
UpperCamelCase_ = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
UpperCamelCase_ = re.compile(r"""\[([^\]]+)\]""")
def _UpperCAmelCase ( _lowerCamelCase : List[Any] ) -> str:
_lowerCAmelCase : Dict = _re_indent.search(_lowerCamelCase )
return "" if search is None else search.groups()[0]
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str]="" , _lowerCamelCase : str=None , _lowerCamelCase : List[Any]=None ) -> str:
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Tuple = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(_lowerCamelCase ):
index += 1
_lowerCAmelCase : List[Any] = ["""\n""".join(lines[:index] )]
else:
_lowerCAmelCase : List[str] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_lowerCAmelCase : Union[str, Any] = [lines[index]]
index += 1
while index < len(_lowerCamelCase ) and (end_prompt is None or not lines[index].startswith(_lowerCamelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_lowerCamelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(_lowerCamelCase ) )
if index < len(_lowerCamelCase ) - 1:
_lowerCAmelCase : Union[str, Any] = [lines[index + 1]]
index += 1
else:
_lowerCAmelCase : Dict = []
else:
blocks.append("""\n""".join(_lowerCamelCase ) )
_lowerCAmelCase : Tuple = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_lowerCamelCase ) > 0:
blocks.append("""\n""".join(_lowerCamelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_lowerCamelCase ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] ) -> Any:
def _inner(_lowerCamelCase : Any ):
return key(_lowerCamelCase ).lower().replace("""_""" , """""" )
return _inner
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple=None ) -> Union[str, Any]:
# If no key is provided, we use a noop.
def noop(_lowerCamelCase : List[Any] ):
return x
if key is None:
_lowerCAmelCase : Union[str, Any] = noop
# Constants are all uppercase, they go first.
_lowerCAmelCase : Any = [obj for obj in objects if key(_lowerCamelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_lowerCAmelCase : Union[str, Any] = [obj for obj in objects if key(_lowerCamelCase )[0].isupper() and not key(_lowerCamelCase ).isupper()]
# Functions begin with a lowercase, they go last.
_lowerCAmelCase : Optional[Any] = [obj for obj in objects if not key(_lowerCamelCase )[0].isupper()]
_lowerCAmelCase : List[str] = ignore_underscore(_lowerCamelCase )
return sorted(_lowerCamelCase , key=_lowerCamelCase ) + sorted(_lowerCamelCase , key=_lowerCamelCase ) + sorted(_lowerCamelCase , key=_lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : str ) -> str:
# This inner function sort imports between [ ].
def _replace(_lowerCamelCase : Union[str, Any] ):
_lowerCAmelCase : Optional[Any] = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
_lowerCAmelCase : List[str] = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : int = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(_lowerCamelCase )] ) + "]"
_lowerCAmelCase : Optional[int] = import_statement.split("""\n""" )
if len(_lowerCamelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_lowerCAmelCase : Dict = 2 if lines[1].strip() == """[""" else 1
_lowerCAmelCase : Tuple = [(i, _re_strip_line.search(_lowerCamelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_lowerCAmelCase : Tuple = sort_objects(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] )
_lowerCAmelCase : Optional[Any] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_lowerCamelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_lowerCAmelCase : str = _re_bracket_content.sub(_replace , lines[1] )
else:
_lowerCAmelCase : Tuple = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : Dict = keys[:-1]
_lowerCAmelCase : Optional[Any] = get_indent(lines[1] ) + """, """.join([f'"{k}"' for k in sort_objects(_lowerCamelCase )] )
return "\n".join(_lowerCamelCase )
else:
# Finally we have to deal with imports fitting on one line
_lowerCAmelCase : Dict = _re_bracket_content.sub(_replace , _lowerCamelCase )
return import_statement
def _UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : Union[str, Any]=True ) -> List[str]:
with open(_lowerCamelCase , """r""" ) as f:
_lowerCAmelCase : Optional[Any] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_lowerCAmelCase : Optional[Any] = split_code_in_indented_blocks(
_lowerCamelCase , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_lowerCamelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_lowerCAmelCase : List[str] = main_blocks[block_idx]
_lowerCAmelCase : int = block.split("""\n""" )
# Get to the start of the imports.
_lowerCAmelCase : Any = 0
while line_idx < len(_lowerCamelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase )
else:
line_idx += 1
if line_idx >= len(_lowerCamelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
_lowerCAmelCase : Any = """\n""".join(block_lines[line_idx:-1] )
_lowerCAmelCase : Tuple = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_lowerCAmelCase : Optional[Any] = split_code_in_indented_blocks(_lowerCamelCase , indent_level=_lowerCamelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
_lowerCAmelCase : List[Any] = _re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_lowerCAmelCase : Tuple = [(pattern.search(_lowerCamelCase ).groups()[0] if pattern.search(_lowerCamelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_lowerCAmelCase : List[str] = [(i, key) for i, key in enumerate(_lowerCamelCase ) if key is not None]
_lowerCAmelCase : List[str] = [x[0] for x in sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : List[str] = []
for i in range(len(_lowerCamelCase ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
_lowerCAmelCase : Any = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(_lowerCamelCase )
count += 1
# And we put our main block back together with its first and last line.
_lowerCAmelCase : str = """\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(_lowerCamelCase ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(_lowerCamelCase , """w""" ) as f:
f.write("""\n""".join(_lowerCamelCase ) )
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any]=True ) -> Any:
_lowerCAmelCase : List[Any] = []
for root, _, files in os.walk(_lowerCamelCase ):
if "__init__.py" in files:
_lowerCAmelCase : List[Any] = sort_imports(os.path.join(_lowerCamelCase , """__init__.py""" ) , check_only=_lowerCamelCase )
if result:
_lowerCAmelCase : str = [os.path.join(_lowerCamelCase , """__init__.py""" )]
if len(_lowerCamelCase ) > 0:
raise ValueError(f'Would overwrite {len(_lowerCamelCase )} files, run `make style`.' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
UpperCamelCase_ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 309
| 1
|
'''simple docstring'''
import os
from typing import Dict, List, Tuple, TypeVar, Union
UpperCamelCase_ = TypeVar("""T""")
UpperCamelCase_ = Union[List[T], Tuple[T, ...]]
UpperCamelCase_ = Union[T, List[T], Dict[str, T]]
UpperCamelCase_ = Union[str, bytes, os.PathLike]
| 309
|
'''simple docstring'''
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
UpperCamelCase_ = logging.get_logger(__name__)
class a_ :
def __init__( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : List[str] = question_encoder
_lowerCAmelCase : Optional[Any] = generator
_lowerCAmelCase : Optional[Any] = self.question_encoder
def __UpperCamelCase ( self , snake_case_ ):
if os.path.isfile(snake_case_ ):
raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
_lowerCAmelCase : Any = os.path.join(snake_case_ , """question_encoder_tokenizer""" )
_lowerCAmelCase : Tuple = os.path.join(snake_case_ , """generator_tokenizer""" )
self.question_encoder.save_pretrained(snake_case_ )
self.generator.save_pretrained(snake_case_ )
@classmethod
def __UpperCamelCase ( cls , snake_case_ , **snake_case_ ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
_lowerCAmelCase : Dict = kwargs.pop("""config""" , snake_case_ )
if config is None:
_lowerCAmelCase : List[Any] = RagConfig.from_pretrained(snake_case_ )
_lowerCAmelCase : int = AutoTokenizer.from_pretrained(
snake_case_ , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" )
_lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(
snake_case_ , config=config.generator , subfolder="""generator_tokenizer""" )
return cls(question_encoder=snake_case_ , generator=snake_case_ )
def __call__( self , *snake_case_ , **snake_case_ ):
return self.current_tokenizer(*snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , *snake_case_ , **snake_case_ ):
return self.generator.batch_decode(*snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , *snake_case_ , **snake_case_ ):
return self.generator.decode(*snake_case_ , **snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.question_encoder
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[Any] = self.generator
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = "longest" , snake_case_ = None , snake_case_ = True , **snake_case_ , ):
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" , snake_case_ , )
if max_length is None:
_lowerCAmelCase : Any = self.current_tokenizer.model_max_length
_lowerCAmelCase : List[Any] = self(
snake_case_ , add_special_tokens=snake_case_ , return_tensors=snake_case_ , max_length=snake_case_ , padding=snake_case_ , truncation=snake_case_ , **snake_case_ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
_lowerCAmelCase : List[str] = self.current_tokenizer.model_max_length
_lowerCAmelCase : List[str] = self(
text_target=snake_case_ , add_special_tokens=snake_case_ , return_tensors=snake_case_ , padding=snake_case_ , max_length=snake_case_ , truncation=snake_case_ , **snake_case_ , )
_lowerCAmelCase : Dict = labels["""input_ids"""]
return model_inputs
| 309
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase_ = {
"""configuration_chinese_clip""": [
"""CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ChineseCLIPConfig""",
"""ChineseCLIPOnnxConfig""",
"""ChineseCLIPTextConfig""",
"""ChineseCLIPVisionConfig""",
],
"""processing_chinese_clip""": ["""ChineseCLIPProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""ChineseCLIPFeatureExtractor"""]
UpperCamelCase_ = ["""ChineseCLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ChineseCLIPModel""",
"""ChineseCLIPPreTrainedModel""",
"""ChineseCLIPTextModel""",
"""ChineseCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
|
'''simple docstring'''
# Algorithm for the pigeonhole sorting
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] ) -> List[Any]:
_lowerCAmelCase : List[Any] = min(_lowerCamelCase ) # min() finds the minimum value
_lowerCAmelCase : Tuple = max(_lowerCamelCase ) # max() finds the maximum value
_lowerCAmelCase : int = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
_lowerCAmelCase : Dict = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(_lowerCamelCase , _lowerCamelCase ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
_lowerCAmelCase : Any = 0
for count in range(_lowerCamelCase ):
while holes[count] > 0:
holes[count] -= 1
_lowerCAmelCase : Optional[int] = count + min_val
i += 1
def _UpperCAmelCase ( ) -> Optional[int]:
_lowerCAmelCase : Optional[int] = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(_lowerCamelCase )
print("""Sorted order is:""" , """ """.join(_lowerCamelCase ) )
if __name__ == "__main__":
main()
| 309
| 1
|
'''simple docstring'''
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class a_ (_a ):
__lowerCAmelCase : Dict = (IPNDMScheduler,)
__lowerCAmelCase : Tuple = (("""num_inference_steps""", 5_0),)
def __UpperCamelCase ( self , **snake_case_ ):
_lowerCAmelCase : List[str] = {"""num_train_timesteps""": 1_0_0_0}
config.update(**snake_case_ )
return config
def __UpperCamelCase ( self , snake_case_=0 , **snake_case_ ):
_lowerCAmelCase : Union[str, Any] = dict(self.forward_default_kwargs )
_lowerCAmelCase : Optional[int] = kwargs.pop("""num_inference_steps""" , snake_case_ )
_lowerCAmelCase : Any = self.dummy_sample
_lowerCAmelCase : Optional[Any] = 0.1 * sample
_lowerCAmelCase : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : List[str] = self.get_scheduler_config(**snake_case_ )
_lowerCAmelCase : List[str] = scheduler_class(**snake_case_ )
scheduler.set_timesteps(snake_case_ )
# copy over dummy past residuals
_lowerCAmelCase : int = dummy_past_residuals[:]
if time_step is None:
_lowerCAmelCase : int = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case_ )
_lowerCAmelCase : List[str] = scheduler_class.from_pretrained(snake_case_ )
new_scheduler.set_timesteps(snake_case_ )
# copy over dummy past residuals
_lowerCAmelCase : Union[str, Any] = dummy_past_residuals[:]
_lowerCAmelCase : int = scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
_lowerCAmelCase : Any = new_scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_lowerCAmelCase : int = scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
_lowerCAmelCase : int = new_scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self , snake_case_=0 , **snake_case_ ):
_lowerCAmelCase : Optional[int] = dict(self.forward_default_kwargs )
_lowerCAmelCase : str = kwargs.pop("""num_inference_steps""" , snake_case_ )
_lowerCAmelCase : Optional[Any] = self.dummy_sample
_lowerCAmelCase : List[Any] = 0.1 * sample
_lowerCAmelCase : List[str] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Union[str, Any] = self.get_scheduler_config()
_lowerCAmelCase : List[str] = scheduler_class(**snake_case_ )
scheduler.set_timesteps(snake_case_ )
# copy over dummy past residuals (must be after setting timesteps)
_lowerCAmelCase : Optional[Any] = dummy_past_residuals[:]
if time_step is None:
_lowerCAmelCase : List[Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case_ )
_lowerCAmelCase : Any = scheduler_class.from_pretrained(snake_case_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(snake_case_ )
# copy over dummy past residual (must be after setting timesteps)
_lowerCAmelCase : Tuple = dummy_past_residuals[:]
_lowerCAmelCase : Any = scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
_lowerCAmelCase : List[Any] = new_scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_lowerCAmelCase : Optional[int] = scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
_lowerCAmelCase : Union[str, Any] = new_scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __UpperCamelCase ( self , **snake_case_ ):
_lowerCAmelCase : Any = self.scheduler_classes[0]
_lowerCAmelCase : Optional[Any] = self.get_scheduler_config(**snake_case_ )
_lowerCAmelCase : Union[str, Any] = scheduler_class(**snake_case_ )
_lowerCAmelCase : Union[str, Any] = 1_0
_lowerCAmelCase : Tuple = self.dummy_model()
_lowerCAmelCase : Optional[int] = self.dummy_sample_deter
scheduler.set_timesteps(snake_case_ )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Dict = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Tuple = scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Tuple = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Union[str, Any] = scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample
return sample
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = dict(self.forward_default_kwargs )
_lowerCAmelCase : Optional[Any] = kwargs.pop("""num_inference_steps""" , snake_case_ )
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Union[str, Any] = self.get_scheduler_config()
_lowerCAmelCase : Any = scheduler_class(**snake_case_ )
_lowerCAmelCase : Dict = self.dummy_sample
_lowerCAmelCase : Tuple = 0.1 * sample
if num_inference_steps is not None and hasattr(snake_case_ , """set_timesteps""" ):
scheduler.set_timesteps(snake_case_ )
elif num_inference_steps is not None and not hasattr(snake_case_ , """set_timesteps""" ):
_lowerCAmelCase : List[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowerCAmelCase : int = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_lowerCAmelCase : Any = dummy_past_residuals[:]
_lowerCAmelCase : Optional[Any] = scheduler.timesteps[5]
_lowerCAmelCase : int = scheduler.timesteps[6]
_lowerCAmelCase : List[Any] = scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
_lowerCAmelCase : int = scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
_lowerCAmelCase : Union[str, Any] = scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
_lowerCAmelCase : Union[str, Any] = scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __UpperCamelCase ( self ):
for timesteps in [1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=snake_case_ , time_step=snake_case_ )
def __UpperCamelCase ( self ):
for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0] ):
self.check_over_forward(num_inference_steps=snake_case_ , time_step=snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = self.full_loop()
_lowerCAmelCase : List[Any] = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_mean.item() - 2_5_4_0_5_2_9 ) < 1_0
| 309
|
'''simple docstring'''
def _UpperCAmelCase ( _lowerCamelCase : int = 1_00 ) -> int:
_lowerCAmelCase : Optional[Any] = (n * (n + 1) // 2) ** 2
_lowerCAmelCase : str = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F'{solution() = }')
| 309
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
UpperCamelCase_ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 3_57, 3_66, 4_38, 5_32, 6_85,
7_05, 7_96, 9_30, 10_58, 12_20, 12_67, 12_79, 13_03, 13_43, 13_77,
13_91, 16_35, 17_82, 18_75, 21_62, 23_61, 24_88, 34_67, 40_08, 42_11,
46_00, 48_08, 52_99, 58_55, 63_29, 72_03, 96_09, 99_59, 1_05_63, 1_07_86,
1_14_20, 1_17_09, 1_19_07, 1_31_63, 1_36_97, 1_37_00, 1_48_08, 1_53_06, 1_64_10, 1_67_91,
1_79_92, 1_92_03, 1_95_10, 2_07_24, 2_23_05, 2_29_35, 2_70_07, 3_01_09, 3_04_20, 3_34_09,
3_49_49, 4_02_83, 4_04_93, 4_05_49, 4_72_82, 4_91_46, 5_02_57, 5_03_59, 5_03_60, 5_03_61
]
UpperCamelCase_ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 3_59, 5_03, 5_22, 5_42, 8_73,
8_93, 9_02, 9_18, 9_22, 9_31, 13_50, 18_53, 19_82, 24_60, 26_27,
32_46, 32_53, 32_68, 35_36, 38_46, 39_61, 41_83, 46_67, 65_85, 66_47,
72_73, 90_61, 93_83, 1_04_28, 1_09_29, 1_19_38, 1_20_33, 1_23_31, 1_25_62, 1_37_93,
1_41_57, 1_46_35, 1_52_65, 1_56_18, 1_65_53, 1_66_04, 1_83_62, 1_89_56, 2_00_75, 2_16_75,
2_25_20, 2_61_30, 2_61_61, 2_64_35, 2_82_79, 2_94_64, 3_16_50, 3_23_02, 3_24_70, 3_68_65,
4_28_63, 4_74_25, 4_98_70, 5_02_54, 5_02_58, 5_03_60, 5_03_61, 5_03_62
]
class a_ (_a ):
__lowerCAmelCase : Optional[Any] = """whisper"""
__lowerCAmelCase : Any = ["""past_key_values"""]
__lowerCAmelCase : Tuple = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , snake_case_=5_1_8_6_5 , snake_case_=8_0 , snake_case_=6 , snake_case_=4 , snake_case_=6 , snake_case_=4 , snake_case_=1_5_3_6 , snake_case_=1_5_3_6 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=5_0_2_5_7 , snake_case_=True , snake_case_=True , snake_case_="gelu" , snake_case_=2_5_6 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.02 , snake_case_=False , snake_case_=1_5_0_0 , snake_case_=4_4_8 , snake_case_=5_0_2_5_6 , snake_case_=5_0_2_5_6 , snake_case_=5_0_2_5_6 , snake_case_=None , snake_case_=[2_2_0, 5_0_2_5_6] , snake_case_=False , snake_case_=2_5_6 , snake_case_=False , snake_case_=0.05 , snake_case_=1_0 , snake_case_=2 , snake_case_=0.0 , snake_case_=1_0 , snake_case_=0 , snake_case_=7 , **snake_case_ , ):
_lowerCAmelCase : Union[str, Any] = vocab_size
_lowerCAmelCase : List[str] = num_mel_bins
_lowerCAmelCase : Union[str, Any] = d_model
_lowerCAmelCase : str = encoder_layers
_lowerCAmelCase : Any = encoder_attention_heads
_lowerCAmelCase : Union[str, Any] = decoder_layers
_lowerCAmelCase : Optional[Any] = decoder_attention_heads
_lowerCAmelCase : str = decoder_ffn_dim
_lowerCAmelCase : Dict = encoder_ffn_dim
_lowerCAmelCase : Optional[int] = dropout
_lowerCAmelCase : Optional[Any] = attention_dropout
_lowerCAmelCase : str = activation_dropout
_lowerCAmelCase : List[str] = activation_function
_lowerCAmelCase : int = init_std
_lowerCAmelCase : List[str] = encoder_layerdrop
_lowerCAmelCase : Tuple = decoder_layerdrop
_lowerCAmelCase : Tuple = use_cache
_lowerCAmelCase : str = encoder_layers
_lowerCAmelCase : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True
_lowerCAmelCase : Any = max_source_positions
_lowerCAmelCase : List[Any] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
_lowerCAmelCase : str = classifier_proj_size
_lowerCAmelCase : Optional[int] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCAmelCase : int = apply_spec_augment
_lowerCAmelCase : Tuple = mask_time_prob
_lowerCAmelCase : str = mask_time_length
_lowerCAmelCase : Optional[Any] = mask_time_min_masks
_lowerCAmelCase : Dict = mask_feature_prob
_lowerCAmelCase : List[str] = mask_feature_length
_lowerCAmelCase : str = mask_feature_min_masks
_lowerCAmelCase : List[Any] = median_filter_width
super().__init__(
pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , is_encoder_decoder=snake_case_ , decoder_start_token_id=snake_case_ , suppress_tokens=snake_case_ , begin_suppress_tokens=snake_case_ , **snake_case_ , )
class a_ (_a ):
@property
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = OrderedDict(
[
("""input_features""", {0: """batch""", 1: """feature_size""", 2: """encoder_sequence"""}),
] )
if self.use_past:
_lowerCAmelCase : Dict = {0: """batch"""}
else:
_lowerCAmelCase : List[Any] = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(snake_case_ , direction="""inputs""" )
return common_inputs
def __UpperCamelCase ( self , snake_case_ , snake_case_ = -1 , snake_case_ = -1 , snake_case_ = False , snake_case_ = None , snake_case_ = 2_2_0_5_0 , snake_case_ = 5.0 , snake_case_ = 2_2_0 , ):
_lowerCAmelCase : List[Any] = OrderedDict()
_lowerCAmelCase : List[Any] = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=snake_case_ , framework=snake_case_ , sampling_rate=snake_case_ , time_duration=snake_case_ , frequency=snake_case_ , )
_lowerCAmelCase : List[str] = encoder_inputs["""input_features"""].shape[2]
_lowerCAmelCase : Tuple = encoder_sequence_length // 2 if self.use_past else seq_length
_lowerCAmelCase : List[str] = super().generate_dummy_inputs(
preprocessor.tokenizer , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : List[Any] = encoder_inputs.pop("""input_features""" )
_lowerCAmelCase : Optional[Any] = decoder_inputs.pop("""decoder_input_ids""" )
if "past_key_values" in decoder_inputs:
_lowerCAmelCase : Any = decoder_inputs.pop("""past_key_values""" )
return dummy_inputs
@property
def __UpperCamelCase ( self ):
return 1E-3
| 309
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FocalNetForImageClassification""",
"""FocalNetForMaskedImageModeling""",
"""FocalNetBackbone""",
"""FocalNetModel""",
"""FocalNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
| 1
|
'''simple docstring'''
import collections
import importlib.util
import os
import re
from pathlib import Path
UpperCamelCase_ = """src/transformers"""
# Matches is_xxx_available()
UpperCamelCase_ = re.compile(r"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
UpperCamelCase_ = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
UpperCamelCase_ = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
UpperCamelCase_ = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
UpperCamelCase_ = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
UpperCamelCase_ = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
UpperCamelCase_ = re.compile("""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
UpperCamelCase_ = re.compile("""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
UpperCamelCase_ = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
UpperCamelCase_ = re.compile(r"""^\s*try:""")
# Catches a line with else:
UpperCamelCase_ = re.compile(r"""^\s*else:""")
def _UpperCAmelCase ( _lowerCamelCase : List[str] ) -> Optional[int]:
if _re_test_backend.search(_lowerCamelCase ) is None:
return None
_lowerCAmelCase : str = [b[0] for b in _re_backend.findall(_lowerCamelCase )]
backends.sort()
return "_and_".join(_lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : Optional[int] ) -> Optional[Any]:
with open(_lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
_lowerCAmelCase : List[str] = f.readlines()
_lowerCAmelCase : Union[str, Any] = 0
while line_index < len(_lowerCamelCase ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_lowerCamelCase ):
return None
# First grab the objects without a specific backend in _import_structure
_lowerCAmelCase : Optional[Any] = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
_lowerCAmelCase : List[Any] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_lowerCamelCase ):
_lowerCAmelCase : Tuple = _re_one_line_import_struct.search(_lowerCamelCase ).groups()[0]
_lowerCAmelCase : int = re.findall("""\[([^\]]+)\]""" , _lowerCamelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
_lowerCAmelCase : str = _re_import_struct_key_value.search(_lowerCamelCase )
if single_line_import_search is not None:
_lowerCAmelCase : Optional[int] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(_lowerCamelCase ) > 0]
objects.extend(_lowerCamelCase )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
_lowerCAmelCase : Optional[Any] = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
_lowerCAmelCase : Optional[int] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_lowerCAmelCase : Dict = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_lowerCAmelCase : Optional[int] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
_lowerCAmelCase : int = lines[line_index]
if _re_import_struct_add_one.search(_lowerCamelCase ) is not None:
objects.append(_re_import_struct_add_one.search(_lowerCamelCase ).groups()[0] )
elif _re_import_struct_add_many.search(_lowerCamelCase ) is not None:
_lowerCAmelCase : Any = _re_import_struct_add_many.search(_lowerCamelCase ).groups()[0].split(""", """ )
_lowerCAmelCase : Optional[int] = [obj[1:-1] for obj in imports if len(_lowerCamelCase ) > 0]
objects.extend(_lowerCamelCase )
elif _re_between_brackets.search(_lowerCamelCase ) is not None:
_lowerCAmelCase : Tuple = _re_between_brackets.search(_lowerCamelCase ).groups()[0].split(""", """ )
_lowerCAmelCase : Optional[int] = [obj[1:-1] for obj in imports if len(_lowerCamelCase ) > 0]
objects.extend(_lowerCamelCase )
elif _re_quote_object.search(_lowerCamelCase ) is not None:
objects.append(_re_quote_object.search(_lowerCamelCase ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 12 + """\"""" ):
objects.append(line[13:-3] )
line_index += 1
_lowerCAmelCase : Union[str, Any] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_lowerCAmelCase : str = []
while (
line_index < len(_lowerCamelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
_lowerCAmelCase : str = lines[line_index]
_lowerCAmelCase : Tuple = _re_import.search(_lowerCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
_lowerCAmelCase : Optional[int] = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(_lowerCamelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
_lowerCAmelCase : Optional[int] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_lowerCAmelCase : List[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_lowerCAmelCase : List[Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
_lowerCAmelCase : List[Any] = lines[line_index]
_lowerCAmelCase : List[str] = _re_import.search(_lowerCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 12 ):
objects.append(line[12:-2] )
line_index += 1
_lowerCAmelCase : int = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] ) -> Optional[Any]:
def find_duplicates(_lowerCamelCase : Any ):
return [k for k, v in collections.Counter(_lowerCamelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_lowerCAmelCase : List[Any] = []
for key in import_dict_objects.keys():
_lowerCAmelCase : int = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f'Duplicate _import_structure definitions for: {duplicate_imports}' )
_lowerCAmelCase : Optional[int] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_lowerCAmelCase : str = """base imports""" if key == """none""" else f'{key} backend'
errors.append(f'Differences for {name}:' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f' {a} in TYPE_HINT but not in _import_structure.' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f' {a} in _import_structure but not in TYPE_HINT.' )
return errors
def _UpperCAmelCase ( ) -> List[str]:
_lowerCAmelCase : Union[str, Any] = []
for root, _, files in os.walk(_lowerCamelCase ):
if "__init__.py" in files:
_lowerCAmelCase : List[Any] = os.path.join(_lowerCamelCase , """__init__.py""" )
_lowerCAmelCase : Union[str, Any] = parse_init(_lowerCamelCase )
if objects is not None:
_lowerCAmelCase : Union[str, Any] = analyze_results(*_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
_lowerCAmelCase : Union[str, Any] = f'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'
failures.append("""\n""".join(_lowerCamelCase ) )
if len(_lowerCamelCase ) > 0:
raise ValueError("""\n\n""".join(_lowerCamelCase ) )
def _UpperCAmelCase ( ) -> str:
_lowerCAmelCase : Optional[int] = []
for path, directories, files in os.walk(_lowerCamelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(_lowerCamelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_lowerCamelCase ) / folder).glob("""*.py""" ) ) ) == 0:
continue
_lowerCAmelCase : Dict = str((Path(_lowerCamelCase ) / folder).relative_to(_lowerCamelCase ) )
_lowerCAmelCase : Tuple = short_path.replace(os.path.sep , """.""" )
submodules.append(_lowerCamelCase )
for fname in files:
if fname == "__init__.py":
continue
_lowerCAmelCase : Union[str, Any] = str((Path(_lowerCamelCase ) / fname).relative_to(_lowerCamelCase ) )
_lowerCAmelCase : Any = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(_lowerCamelCase )
return submodules
UpperCamelCase_ = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
]
def _UpperCAmelCase ( ) -> str:
# This is to make sure the transformers module imported is the one in the repo.
_lowerCAmelCase : Tuple = importlib.util.spec_from_file_location(
"""transformers""" , os.path.join(_lowerCamelCase , """__init__.py""" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
_lowerCAmelCase : Optional[int] = spec.loader.load_module()
_lowerCAmelCase : Union[str, Any] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(_lowerCamelCase ) > 0:
_lowerCAmelCase : str = """\n""".join(f'- {module}' for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registered in the main init of Transformers:\n"""
f'{list_of_modules}\n'
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 309
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
UpperCamelCase_ = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""DPTFeatureExtractor"""]
UpperCamelCase_ = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
| 1
|
'''simple docstring'''
def _UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : int ) -> str:
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
_lowerCAmelCase : Optional[Any] = str(bin(_lowerCamelCase ) )[2:] # remove the leading "0b"
_lowerCAmelCase : List[Any] = str(bin(_lowerCamelCase ) )[2:]
_lowerCAmelCase : int = max(len(_lowerCamelCase ) , len(_lowerCamelCase ) )
return "0b" + "".join(
str(int("""1""" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(_lowerCamelCase ) , b_binary.zfill(_lowerCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 309
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
def _UpperCAmelCase ( _lowerCamelCase : list[float] ) -> Dict:
return np.maximum(0 , _lowerCamelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 309
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class a_ (unittest.TestCase ):
def __init__( self , snake_case_ , snake_case_=7 , snake_case_=3 , snake_case_=1_8 , snake_case_=3_0 , snake_case_=4_0_0 , snake_case_=True , snake_case_=None , snake_case_=True , snake_case_=None , snake_case_=True , ):
_lowerCAmelCase : str = size if size is not None else {"""shortest_edge""": 2_0}
_lowerCAmelCase : int = crop_size if crop_size is not None else {"""height""": 1_8, """width""": 1_8}
_lowerCAmelCase : Optional[int] = parent
_lowerCAmelCase : str = batch_size
_lowerCAmelCase : Optional[Any] = num_channels
_lowerCAmelCase : Optional[int] = image_size
_lowerCAmelCase : List[Any] = min_resolution
_lowerCAmelCase : int = max_resolution
_lowerCAmelCase : Any = do_resize
_lowerCAmelCase : List[Any] = size
_lowerCAmelCase : List[Any] = do_center_crop
_lowerCAmelCase : Any = crop_size
_lowerCAmelCase : List[str] = do_flip_channel_order
def __UpperCamelCase ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class a_ (_a , unittest.TestCase ):
__lowerCAmelCase : Any = MobileViTImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = MobileViTImageProcessingTester(self )
@property
def __UpperCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , """do_resize""" ) )
self.assertTrue(hasattr(snake_case_ , """size""" ) )
self.assertTrue(hasattr(snake_case_ , """do_center_crop""" ) )
self.assertTrue(hasattr(snake_case_ , """center_crop""" ) )
self.assertTrue(hasattr(snake_case_ , """do_flip_channel_order""" ) )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 2_0} )
self.assertEqual(image_processor.crop_size , {"""height""": 1_8, """width""": 1_8} )
_lowerCAmelCase : int = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 4_2} )
self.assertEqual(image_processor.crop_size , {"""height""": 8_4, """width""": 8_4} )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
# Initialize image_processing
_lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , Image.Image )
# Test not batched input
_lowerCAmelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase : List[Any] = image_processing(snake_case_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __UpperCamelCase ( self ):
# Initialize image_processing
_lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , numpify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , np.ndarray )
# Test not batched input
_lowerCAmelCase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase : Dict = image_processing(snake_case_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __UpperCamelCase ( self ):
# Initialize image_processing
_lowerCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , torchify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , torch.Tensor )
# Test not batched input
_lowerCAmelCase : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase : Tuple = image_processing(snake_case_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 309
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class a_ (_a ):
def __init__( self , *snake_case_ , **snake_case_ ):
warnings.warn(
"""The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DonutImageProcessor instead.""" , snake_case_ , )
super().__init__(*snake_case_ , **snake_case_ )
| 309
| 1
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class a_ (_a ):
__lowerCAmelCase : int = ["""image_processor""", """tokenizer"""]
__lowerCAmelCase : str = """CLIPImageProcessor"""
__lowerCAmelCase : Tuple = ("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self , snake_case_=None , snake_case_=None , **snake_case_ ):
_lowerCAmelCase : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , snake_case_ , )
_lowerCAmelCase : Dict = kwargs.pop("""feature_extractor""" )
_lowerCAmelCase : Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(snake_case_ , snake_case_ )
def __call__( self , snake_case_=None , snake_case_=None , snake_case_=None , **snake_case_ ):
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
_lowerCAmelCase : int = self.tokenizer(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
if images is not None:
_lowerCAmelCase : List[str] = self.image_processor(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
if text is not None and images is not None:
_lowerCAmelCase : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case_ ) , tensor_type=snake_case_ )
def __UpperCamelCase ( self , *snake_case_ , **snake_case_ ):
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , *snake_case_ , **snake_case_ ):
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def __UpperCamelCase ( self ):
_lowerCAmelCase : Tuple = self.tokenizer.model_input_names
_lowerCAmelCase : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 309
|
'''simple docstring'''
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class a_ (_a ):
__lowerCAmelCase : Dict = (DPMSolverSDEScheduler,)
__lowerCAmelCase : Dict = 1_0
def __UpperCamelCase ( self , **snake_case_ ):
_lowerCAmelCase : List[Any] = {
"""num_train_timesteps""": 1_1_0_0,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""noise_sampler_seed""": 0,
}
config.update(**snake_case_ )
return config
def __UpperCamelCase ( self ):
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=snake_case_ )
def __UpperCamelCase ( self ):
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=snake_case_ , beta_end=snake_case_ )
def __UpperCamelCase ( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=snake_case_ )
def __UpperCamelCase ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.scheduler_classes[0]
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : Any = scheduler_class(**snake_case_ )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCAmelCase : Tuple = self.dummy_model()
_lowerCAmelCase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCAmelCase : Optional[Any] = sample.to(snake_case_ )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Union[str, Any] = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : Union[str, Any] = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Any = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : Dict = output.prev_sample
_lowerCAmelCase : List[Any] = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1E-2
assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1E-2
assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.scheduler_classes[0]
_lowerCAmelCase : Optional[Any] = self.get_scheduler_config(prediction_type="""v_prediction""" )
_lowerCAmelCase : Dict = scheduler_class(**snake_case_ )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCAmelCase : int = self.dummy_model()
_lowerCAmelCase : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCAmelCase : int = sample.to(snake_case_ )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : List[str] = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : List[Any] = model(snake_case_ , snake_case_ )
_lowerCAmelCase : str = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : int = output.prev_sample
_lowerCAmelCase : str = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Optional[int] = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1E-2
assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1E-2
assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1E-2
assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : str = scheduler_class(**snake_case_ )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case_ )
_lowerCAmelCase : Tuple = self.dummy_model()
_lowerCAmelCase : Optional[int] = self.dummy_sample_deter.to(snake_case_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_lowerCAmelCase : str = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : Dict = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Any = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : Dict = output.prev_sample
_lowerCAmelCase : List[Any] = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1E-2
assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1E-2
assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : Any = self.scheduler_classes[0]
_lowerCAmelCase : Optional[int] = self.get_scheduler_config()
_lowerCAmelCase : Tuple = scheduler_class(**snake_case_ , use_karras_sigmas=snake_case_ )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case_ )
_lowerCAmelCase : List[Any] = self.dummy_model()
_lowerCAmelCase : str = self.dummy_sample_deter.to(snake_case_ ) * scheduler.init_noise_sigma
_lowerCAmelCase : Optional[int] = sample.to(snake_case_ )
for t in scheduler.timesteps:
_lowerCAmelCase : List[str] = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : int = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Optional[int] = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : str = output.prev_sample
_lowerCAmelCase : Optional[Any] = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
| 309
| 1
|
'''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from numpy import array
def _UpperCAmelCase ( _lowerCamelCase : list[list[float]] ) -> list[list[float]]:
_lowerCAmelCase : Optional[Any] = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(_lowerCamelCase ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
_lowerCAmelCase : Optional[int] = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creates a copy of the matrix with swapped positions of the elements
_lowerCAmelCase : List[str] = [[0.0, 0.0], [0.0, 0.0]]
_lowerCAmelCase , _lowerCAmelCase : List[Any] = matrix[1][1], matrix[0][0]
_lowerCAmelCase , _lowerCAmelCase : str = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(_lowerCamelCase ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(_lowerCamelCase ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
_lowerCAmelCase : Dict = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creating cofactor matrix
_lowerCAmelCase : Optional[int] = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
_lowerCAmelCase : Dict = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
_lowerCAmelCase : Union[str, Any] = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
_lowerCAmelCase : List[Any] = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
_lowerCAmelCase : Optional[int] = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
_lowerCAmelCase : Optional[Any] = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
_lowerCAmelCase : List[Any] = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
_lowerCAmelCase : str = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
_lowerCAmelCase : str = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
_lowerCAmelCase : str = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
_lowerCAmelCase : List[Any] = array(_lowerCamelCase )
for i in range(3 ):
for j in range(3 ):
_lowerCAmelCase : Optional[Any] = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
_lowerCAmelCase : List[str] = array(_lowerCamelCase )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(_lowerCamelCase )
# Calculate the inverse of the matrix
return [[float(d(_lowerCamelCase ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("""Please provide a matrix of size 2x2 or 3x3.""" )
| 309
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"""vocab_file""": """vocab.txt"""}
UpperCamelCase_ = {
"""vocab_file""": {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt""",
}
}
UpperCamelCase_ = {
"""YituTech/conv-bert-base""": 5_12,
"""YituTech/conv-bert-medium-small""": 5_12,
"""YituTech/conv-bert-small""": 5_12,
}
UpperCamelCase_ = {
"""YituTech/conv-bert-base""": {"""do_lower_case""": True},
"""YituTech/conv-bert-medium-small""": {"""do_lower_case""": True},
"""YituTech/conv-bert-small""": {"""do_lower_case""": True},
}
class a_ (_a ):
__lowerCAmelCase : Any = VOCAB_FILES_NAMES
__lowerCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Dict = PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Optional[int] = ConvBertTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=True , snake_case_="[UNK]" , snake_case_="[SEP]" , snake_case_="[PAD]" , snake_case_="[CLS]" , snake_case_="[MASK]" , snake_case_=True , snake_case_=None , **snake_case_ , ):
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
_lowerCAmelCase : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , snake_case_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , snake_case_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , snake_case_ ) != tokenize_chinese_chars
):
_lowerCAmelCase : Dict = getattr(snake_case_ , normalizer_state.pop("""type""" ) )
_lowerCAmelCase : List[str] = do_lower_case
_lowerCAmelCase : str = strip_accents
_lowerCAmelCase : List[Any] = tokenize_chinese_chars
_lowerCAmelCase : List[Any] = normalizer_class(**snake_case_ )
_lowerCAmelCase : str = do_lower_case
def __UpperCamelCase ( self , snake_case_ , snake_case_=None ):
_lowerCAmelCase : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : Optional[Any] = [self.sep_token_id]
_lowerCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : Any = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
| 309
| 1
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"""vocab_file""": """vocab.txt"""}
UpperCamelCase_ = {
"""vocab_file""": {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt""",
}
}
UpperCamelCase_ = {
"""YituTech/conv-bert-base""": 5_12,
"""YituTech/conv-bert-medium-small""": 5_12,
"""YituTech/conv-bert-small""": 5_12,
}
UpperCamelCase_ = {
"""YituTech/conv-bert-base""": {"""do_lower_case""": True},
"""YituTech/conv-bert-medium-small""": {"""do_lower_case""": True},
"""YituTech/conv-bert-small""": {"""do_lower_case""": True},
}
class a_ (_a ):
__lowerCAmelCase : Any = VOCAB_FILES_NAMES
__lowerCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Dict = PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Optional[int] = ConvBertTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=True , snake_case_="[UNK]" , snake_case_="[SEP]" , snake_case_="[PAD]" , snake_case_="[CLS]" , snake_case_="[MASK]" , snake_case_=True , snake_case_=None , **snake_case_ , ):
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
_lowerCAmelCase : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , snake_case_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , snake_case_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , snake_case_ ) != tokenize_chinese_chars
):
_lowerCAmelCase : Dict = getattr(snake_case_ , normalizer_state.pop("""type""" ) )
_lowerCAmelCase : List[str] = do_lower_case
_lowerCAmelCase : str = strip_accents
_lowerCAmelCase : List[Any] = tokenize_chinese_chars
_lowerCAmelCase : List[Any] = normalizer_class(**snake_case_ )
_lowerCAmelCase : str = do_lower_case
def __UpperCamelCase ( self , snake_case_ , snake_case_=None ):
_lowerCAmelCase : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : Optional[Any] = [self.sep_token_id]
_lowerCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : Any = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
| 309
|
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a_ :
def __init__( self ):
_lowerCAmelCase : Any = """"""
_lowerCAmelCase : List[Any] = """"""
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : int = 0
_lowerCAmelCase : str = 2_5_6
_lowerCAmelCase : List[str] = 0
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Dict = 0
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : str = cva.imread(snake_case_ , 0 )
_lowerCAmelCase : List[str] = copy.deepcopy(self.img )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] , label="""x""" )
_lowerCAmelCase : List[Any] = np.sum(snake_case_ )
for i in range(len(snake_case_ ) ):
_lowerCAmelCase : Optional[int] = x[i] / self.k
self.sk += prk
_lowerCAmelCase : Any = (self.L - 1) * self.sk
if self.rem != 0:
_lowerCAmelCase : Dict = int(last % last )
_lowerCAmelCase : str = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(snake_case_ )
_lowerCAmelCase : str = int(np.ma.count(self.img ) / self.img[1].size )
_lowerCAmelCase : Union[str, Any] = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
_lowerCAmelCase : Union[str, Any] = self.img[j][i]
if num != self.last_list[num]:
_lowerCAmelCase : List[str] = self.last_list[num]
cva.imwrite("""output_data/output.jpg""" , self.img )
def __UpperCamelCase ( self ):
plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] )
def __UpperCamelCase ( self ):
cva.imshow("""Output-Image""" , self.img )
cva.imshow("""Input-Image""" , self.original_image )
cva.waitKey(5_0_0_0 )
cva.destroyAllWindows()
if __name__ == "__main__":
UpperCamelCase_ = os.path.join(os.path.basename(__file__), """image_data/input.jpg""")
UpperCamelCase_ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 309
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase_ = {
"""configuration_bridgetower""": [
"""BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BridgeTowerConfig""",
"""BridgeTowerTextConfig""",
"""BridgeTowerVisionConfig""",
],
"""processing_bridgetower""": ["""BridgeTowerProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""BridgeTowerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BridgeTowerForContrastiveLearning""",
"""BridgeTowerForImageAndTextRetrieval""",
"""BridgeTowerForMaskedLM""",
"""BridgeTowerModel""",
"""BridgeTowerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 309
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase_ = {
"""configuration_groupvit""": [
"""GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""GroupViTConfig""",
"""GroupViTOnnxConfig""",
"""GroupViTTextConfig""",
"""GroupViTVisionConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GroupViTModel""",
"""GroupViTPreTrainedModel""",
"""GroupViTTextModel""",
"""GroupViTVisionModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFGroupViTModel""",
"""TFGroupViTPreTrainedModel""",
"""TFGroupViTTextModel""",
"""TFGroupViTVisionModel""",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
| 1
|
'''simple docstring'''
from collections.abc import Sequence
from queue import Queue
class a_ :
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=None , snake_case_=None ):
_lowerCAmelCase : int = start
_lowerCAmelCase : Tuple = end
_lowerCAmelCase : Dict = val
_lowerCAmelCase : Tuple = (start + end) // 2
_lowerCAmelCase : Dict = left
_lowerCAmelCase : Optional[Any] = right
def __repr__( self ):
return f'SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'
class a_ :
def __init__( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : Optional[int] = collection
_lowerCAmelCase : Optional[Any] = function
if self.collection:
_lowerCAmelCase : int = self._build_tree(0 , len(snake_case_ ) - 1 )
def __UpperCamelCase ( self , snake_case_ , snake_case_ ):
self._update_tree(self.root , snake_case_ , snake_case_ )
def __UpperCamelCase ( self , snake_case_ , snake_case_ ):
return self._query_range(self.root , snake_case_ , snake_case_ )
def __UpperCamelCase ( self , snake_case_ , snake_case_ ):
if start == end:
return SegmentTreeNode(snake_case_ , snake_case_ , self.collection[start] )
_lowerCAmelCase : List[str] = (start + end) // 2
_lowerCAmelCase : Optional[Any] = self._build_tree(snake_case_ , snake_case_ )
_lowerCAmelCase : List[str] = self._build_tree(mid + 1 , snake_case_ )
return SegmentTreeNode(snake_case_ , snake_case_ , self.fn(left.val , right.val ) , snake_case_ , snake_case_ )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
if node.start == i and node.end == i:
_lowerCAmelCase : Tuple = val
return
if i <= node.mid:
self._update_tree(node.left , snake_case_ , snake_case_ )
else:
self._update_tree(node.right , snake_case_ , snake_case_ )
_lowerCAmelCase : int = self.fn(node.left.val , node.right.val )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , snake_case_ , snake_case_ )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , snake_case_ , node.mid ) , self._query_range(node.right , node.mid + 1 , snake_case_ ) , )
else:
# range in right child tree
return self._query_range(node.right , snake_case_ , snake_case_ )
def __UpperCamelCase ( self ):
if self.root is not None:
_lowerCAmelCase : Optional[Any] = Queue()
queue.put(self.root )
while not queue.empty():
_lowerCAmelCase : Optional[Any] = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print("""*""" * 50)
UpperCamelCase_ = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 309
|
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
UpperCamelCase_ = 0
UpperCamelCase_ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
UpperCamelCase_ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
UpperCamelCase_ = tuple[int, int]
class a_ :
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
_lowerCAmelCase : Optional[int] = pos_x
_lowerCAmelCase : List[str] = pos_y
_lowerCAmelCase : Tuple = (pos_y, pos_x)
_lowerCAmelCase : List[Any] = goal_x
_lowerCAmelCase : int = goal_y
_lowerCAmelCase : Union[str, Any] = g_cost
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : List[Any] = self.calculate_heuristic()
_lowerCAmelCase : Optional[int] = self.g_cost + self.h_cost
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.pos_x - self.goal_x
_lowerCAmelCase : Optional[int] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(snake_case_ ) + abs(snake_case_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , snake_case_ ):
return self.f_cost < other.f_cost
class a_ :
def __init__( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : Optional[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , snake_case_ )
_lowerCAmelCase : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , snake_case_ )
_lowerCAmelCase : List[str] = [self.start]
_lowerCAmelCase : list[Node] = []
_lowerCAmelCase : List[str] = False
def __UpperCamelCase ( self ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
_lowerCAmelCase : Optional[int] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(snake_case_ )
self.closed_nodes.append(snake_case_ )
_lowerCAmelCase : Optional[int] = self.get_successors(snake_case_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(snake_case_ )
else:
# retrieve the best current path
_lowerCAmelCase : Optional[Any] = self.open_nodes.pop(self.open_nodes.index(snake_case_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(snake_case_ )
else:
self.open_nodes.append(snake_case_ )
return [self.start.pos]
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Union[str, Any] = []
for action in delta:
_lowerCAmelCase : Union[str, Any] = parent.pos_x + action[1]
_lowerCAmelCase : Dict = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(snake_case_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
snake_case_ , snake_case_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , snake_case_ , ) )
return successors
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : List[Any] = node
_lowerCAmelCase : Optional[Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_lowerCAmelCase : Optional[int] = current_node.parent
path.reverse()
return path
class a_ :
def __init__( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : List[str] = AStar(snake_case_ , snake_case_ )
_lowerCAmelCase : int = AStar(snake_case_ , snake_case_ )
_lowerCAmelCase : Optional[int] = False
def __UpperCamelCase ( self ):
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
_lowerCAmelCase : Tuple = self.fwd_astar.open_nodes.pop(0 )
_lowerCAmelCase : Optional[Any] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
snake_case_ , snake_case_ )
self.fwd_astar.closed_nodes.append(snake_case_ )
self.bwd_astar.closed_nodes.append(snake_case_ )
_lowerCAmelCase : List[str] = current_bwd_node
_lowerCAmelCase : Dict = current_fwd_node
_lowerCAmelCase : Any = {
self.fwd_astar: self.fwd_astar.get_successors(snake_case_ ),
self.bwd_astar: self.bwd_astar.get_successors(snake_case_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(snake_case_ )
else:
# retrieve the best current path
_lowerCAmelCase : List[Any] = astar.open_nodes.pop(
astar.open_nodes.index(snake_case_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(snake_case_ )
else:
astar.open_nodes.append(snake_case_ )
return [self.fwd_astar.start.pos]
def __UpperCamelCase ( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : int = self.fwd_astar.retrace_path(snake_case_ )
_lowerCAmelCase : Optional[Any] = self.bwd_astar.retrace_path(snake_case_ )
bwd_path.pop()
bwd_path.reverse()
_lowerCAmelCase : Dict = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
UpperCamelCase_ = (0, 0)
UpperCamelCase_ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
UpperCamelCase_ = time.time()
UpperCamelCase_ = AStar(init, goal)
UpperCamelCase_ = a_star.search()
UpperCamelCase_ = time.time() - start_time
print(F'AStar execution time = {end_time:f} seconds')
UpperCamelCase_ = time.time()
UpperCamelCase_ = BidirectionalAStar(init, goal)
UpperCamelCase_ = time.time() - bd_start_time
print(F'BidirectionalAStar execution time = {bd_end_time:f} seconds')
| 309
| 1
|
'''simple docstring'''
import math
def _UpperCAmelCase ( _lowerCamelCase : int ) -> list[int]:
_lowerCAmelCase : Dict = []
_lowerCAmelCase : int = 2
_lowerCAmelCase : Dict = int(math.sqrt(_lowerCamelCase ) ) # Size of every segment
_lowerCAmelCase : Union[str, Any] = [True] * (end + 1)
_lowerCAmelCase : Optional[Any] = []
while start <= end:
if temp[start] is True:
in_prime.append(_lowerCamelCase )
for i in range(start * start , end + 1 , _lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = False
start += 1
prime += in_prime
_lowerCAmelCase : Optional[Any] = end + 1
_lowerCAmelCase : str = min(2 * end , _lowerCamelCase )
while low <= n:
_lowerCAmelCase : Any = [True] * (high - low + 1)
for each in in_prime:
_lowerCAmelCase : Optional[Any] = math.floor(low / each ) * each
if t < low:
t += each
for j in range(_lowerCamelCase , high + 1 , _lowerCamelCase ):
_lowerCAmelCase : str = False
for j in range(len(_lowerCamelCase ) ):
if temp[j] is True:
prime.append(j + low )
_lowerCAmelCase : List[Any] = high + 1
_lowerCAmelCase : Optional[Any] = min(high + end , _lowerCamelCase )
return prime
print(sieve(10**6))
| 309
|
'''simple docstring'''
def _UpperCAmelCase ( _lowerCamelCase : list[int] , _lowerCamelCase : str ) -> list[int]:
_lowerCAmelCase : List[Any] = int(_lowerCamelCase )
# Initialize Result
_lowerCAmelCase : Any = []
# Traverse through all denomination
for denomination in reversed(_lowerCamelCase ):
# Find denominations
while int(_lowerCamelCase ) >= int(_lowerCamelCase ):
total_value -= int(_lowerCamelCase )
answer.append(_lowerCamelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
UpperCamelCase_ = []
UpperCamelCase_ = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
UpperCamelCase_ = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F'Denomination {i}: ').strip()))
UpperCamelCase_ = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
UpperCamelCase_ = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
UpperCamelCase_ = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F'Following is minimal change for {value}: ')
UpperCamelCase_ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 309
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
UpperCamelCase_ = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""DPTFeatureExtractor"""]
UpperCamelCase_ = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase_ = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""EncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""TFEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""FlaxEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
| 1
|
'''simple docstring'''
def _UpperCAmelCase ( _lowerCamelCase : int = 10_00 ) -> int:
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 309
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class a_ (unittest.TestCase ):
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = """laion/clap-htsat-unfused"""
_lowerCAmelCase : int = tempfile.mkdtemp()
def __UpperCamelCase ( self , **snake_case_ ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **snake_case_ )
def __UpperCamelCase ( self , **snake_case_ ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **snake_case_ )
def __UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : List[Any] = self.get_feature_extractor()
_lowerCAmelCase : Union[str, Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : Any = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_lowerCAmelCase : int = self.get_feature_extractor(do_normalize=snake_case_ , padding_value=1.0 )
_lowerCAmelCase : Dict = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = self.get_feature_extractor()
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
_lowerCAmelCase : Union[str, Any] = floats_list((3, 1_0_0_0) )
_lowerCAmelCase : List[str] = feature_extractor(snake_case_ , return_tensors="""np""" )
_lowerCAmelCase : Optional[Any] = processor(audios=snake_case_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = self.get_feature_extractor()
_lowerCAmelCase : List[str] = self.get_tokenizer()
_lowerCAmelCase : Tuple = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
_lowerCAmelCase : Union[str, Any] = """This is a test string"""
_lowerCAmelCase : Union[str, Any] = processor(text=snake_case_ )
_lowerCAmelCase : Optional[int] = tokenizer(snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = self.get_feature_extractor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
_lowerCAmelCase : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase : List[Any] = processor.batch_decode(snake_case_ )
_lowerCAmelCase : Dict = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.get_feature_extractor()
_lowerCAmelCase : Dict = self.get_tokenizer()
_lowerCAmelCase : Optional[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 309
| 1
|
'''simple docstring'''
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def _UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : complex , _lowerCamelCase : str = "x" , _lowerCamelCase : float = 10**-10 , _lowerCamelCase : int = 1 , ) -> complex:
_lowerCAmelCase : List[str] = symbols(_lowerCamelCase )
_lowerCAmelCase : List[Any] = lambdify(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : List[Any] = lambdify(_lowerCamelCase , diff(_lowerCamelCase , _lowerCamelCase ) )
_lowerCAmelCase : Tuple = starting_point
while True:
if diff_function(_lowerCamelCase ) != 0:
_lowerCAmelCase : str = prev_guess - multiplicity * func(_lowerCamelCase ) / diff_function(
_lowerCamelCase )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
_lowerCAmelCase : int = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
# Find fourth Root of 5
print(F'The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5j)}')
# Find value of e
print(
"""The root of log(y) - 1 = 0 is """,
F'{newton_raphson("log(y) - 1", 2, variable="y")}',
)
# Exponential Roots
print(
"""The root of exp(x) - 1 = 0 is""",
F'{newton_raphson("exp(x) - 1", 10, precision=0.0_0_5)}',
)
# Find root of cos(x)
print(F'The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}')
| 309
|
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = """▁"""
UpperCamelCase_ = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
UpperCamelCase_ = {
"""vocab_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json""",
},
"""spm_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model""",
},
"""tokenizer_config_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json""",
},
}
UpperCamelCase_ = {
"""facebook/m2m100_418M""": 10_24,
}
# fmt: off
UpperCamelCase_ = {
"""m2m100""": ["""af""", """am""", """ar""", """ast""", """az""", """ba""", """be""", """bg""", """bn""", """br""", """bs""", """ca""", """ceb""", """cs""", """cy""", """da""", """de""", """el""", """en""", """es""", """et""", """fa""", """ff""", """fi""", """fr""", """fy""", """ga""", """gd""", """gl""", """gu""", """ha""", """he""", """hi""", """hr""", """ht""", """hu""", """hy""", """id""", """ig""", """ilo""", """is""", """it""", """ja""", """jv""", """ka""", """kk""", """km""", """kn""", """ko""", """lb""", """lg""", """ln""", """lo""", """lt""", """lv""", """mg""", """mk""", """ml""", """mn""", """mr""", """ms""", """my""", """ne""", """nl""", """no""", """ns""", """oc""", """or""", """pa""", """pl""", """ps""", """pt""", """ro""", """ru""", """sd""", """si""", """sk""", """sl""", """so""", """sq""", """sr""", """ss""", """su""", """sv""", """sw""", """ta""", """th""", """tl""", """tn""", """tr""", """uk""", """ur""", """uz""", """vi""", """wo""", """xh""", """yi""", """yo""", """zh""", """zu"""],
"""wmt21""": ["""en""", """ha""", """is""", """ja""", """cs""", """ru""", """zh""", """de"""]
}
class a_ (_a ):
__lowerCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
__lowerCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Dict = ["""input_ids""", """attention_mask"""]
__lowerCAmelCase : List[int] = []
__lowerCAmelCase : List[int] = []
def __init__( self , snake_case_ , snake_case_ , snake_case_=None , snake_case_=None , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<pad>" , snake_case_="<unk>" , snake_case_="m2m100" , snake_case_ = None , snake_case_=8 , **snake_case_ , ):
_lowerCAmelCase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCAmelCase : Optional[Any] = language_codes
_lowerCAmelCase : Tuple = FAIRSEQ_LANGUAGE_CODES[language_codes]
_lowerCAmelCase : str = {lang_code: f'__{lang_code}__' for lang_code in fairseq_language_code}
_lowerCAmelCase : int = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(snake_case_ )
for lang_code in fairseq_language_code
if self.get_lang_token(snake_case_ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=snake_case_ , tgt_lang=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , language_codes=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=snake_case_ , **snake_case_ , )
_lowerCAmelCase : Optional[int] = vocab_file
_lowerCAmelCase : Any = load_json(snake_case_ )
_lowerCAmelCase : str = {v: k for k, v in self.encoder.items()}
_lowerCAmelCase : Union[str, Any] = spm_file
_lowerCAmelCase : Tuple = load_spm(snake_case_ , self.sp_model_kwargs )
_lowerCAmelCase : int = len(self.encoder )
_lowerCAmelCase : Union[str, Any] = {
self.get_lang_token(snake_case_ ): self.encoder_size + i for i, lang_code in enumerate(snake_case_ )
}
_lowerCAmelCase : List[str] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(snake_case_ )}
_lowerCAmelCase : Optional[Any] = {v: k for k, v in self.lang_token_to_id.items()}
_lowerCAmelCase : Any = src_lang if src_lang is not None else """en"""
_lowerCAmelCase : Optional[int] = tgt_lang
_lowerCAmelCase : Tuple = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
_lowerCAmelCase : List[Any] = num_madeup_words
@property
def __UpperCamelCase ( self ):
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def __UpperCamelCase ( self ):
return self._src_lang
@src_lang.setter
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCamelCase ( self , snake_case_ ):
return self.sp_model.encode(snake_case_ , out_type=snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(snake_case_ , self.encoder[self.unk_token] )
def __UpperCamelCase ( self , snake_case_ ):
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(snake_case_ , self.unk_token )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = []
_lowerCAmelCase : Optional[int] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(snake_case_ ) + token
_lowerCAmelCase : Optional[Any] = []
else:
current_sub_tokens.append(snake_case_ )
out_string += self.sp_model.decode(snake_case_ )
return out_string.strip()
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
_lowerCAmelCase : List[Any] = [1] * len(self.prefix_tokens )
_lowerCAmelCase : Dict = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(snake_case_ )) + suffix_ones
return prefix_ones + ([0] * len(snake_case_ )) + ([0] * len(snake_case_ )) + suffix_ones
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
_lowerCAmelCase : int = self.__dict__.copy()
_lowerCAmelCase : str = None
return state
def __setstate__( self , snake_case_ ):
_lowerCAmelCase : List[str] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase : str = {}
_lowerCAmelCase : str = load_spm(self.spm_file , self.sp_model_kwargs )
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : Dict = Path(snake_case_ )
if not save_dir.is_dir():
raise OSError(f'{save_directory} should be a directory' )
_lowerCAmelCase : Any = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
_lowerCAmelCase : Any = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , snake_case_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , snake_case_ )
elif not os.path.isfile(self.spm_file ):
with open(snake_case_ , """wb""" ) as fi:
_lowerCAmelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (str(snake_case_ ), str(snake_case_ ))
def __UpperCamelCase ( self , snake_case_ , snake_case_ = "en" , snake_case_ = None , snake_case_ = "ro" , **snake_case_ , ):
_lowerCAmelCase : Union[str, Any] = src_lang
_lowerCAmelCase : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(snake_case_ , snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
_lowerCAmelCase : Dict = src_lang
_lowerCAmelCase : str = self(snake_case_ , add_special_tokens=snake_case_ , **snake_case_ )
_lowerCAmelCase : Union[str, Any] = self.get_lang_id(snake_case_ )
_lowerCAmelCase : Tuple = tgt_lang_id
return inputs
def __UpperCamelCase ( self ):
self.set_src_lang_special_tokens(self.src_lang )
def __UpperCamelCase ( self ):
self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Optional[Any] = self.get_lang_token(snake_case_ )
_lowerCAmelCase : List[Any] = self.lang_token_to_id[lang_token]
_lowerCAmelCase : Any = [self.cur_lang_id]
_lowerCAmelCase : Any = [self.eos_token_id]
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = self.get_lang_token(snake_case_ )
_lowerCAmelCase : int = self.lang_token_to_id[lang_token]
_lowerCAmelCase : str = [self.cur_lang_id]
_lowerCAmelCase : str = [self.eos_token_id]
def __UpperCamelCase ( self , snake_case_ ):
return self.lang_code_to_token[lang]
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : List[str] = self.get_lang_token(snake_case_ )
return self.lang_token_to_id[lang_token]
def _UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
_lowerCAmelCase : Optional[Any] = sentencepiece.SentencePieceProcessor(**_lowerCamelCase )
spm.Load(str(_lowerCamelCase ) )
return spm
def _UpperCAmelCase ( _lowerCamelCase : str ) -> Union[Dict, List]:
with open(_lowerCamelCase , """r""" ) as f:
return json.load(_lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : str ) -> None:
with open(_lowerCamelCase , """w""" ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase , indent=2 )
| 309
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase_ = {
"""configuration_groupvit""": [
"""GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""GroupViTConfig""",
"""GroupViTOnnxConfig""",
"""GroupViTTextConfig""",
"""GroupViTVisionConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GroupViTModel""",
"""GroupViTPreTrainedModel""",
"""GroupViTTextModel""",
"""GroupViTVisionModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFGroupViTModel""",
"""TFGroupViTPreTrainedModel""",
"""TFGroupViTTextModel""",
"""TFGroupViTVisionModel""",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
|
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def _UpperCAmelCase ( _lowerCamelCase : Callable , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ) -> np.ndarray:
_lowerCAmelCase : Union[str, Any] = int(np.ceil((x_end - xa) / step_size ) )
_lowerCAmelCase : Tuple = np.zeros((n + 1,) )
_lowerCAmelCase : List[Any] = ya
_lowerCAmelCase : int = xa
for k in range(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = y[k] + step_size * ode_func(_lowerCamelCase , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 309
| 1
|
'''simple docstring'''
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
UpperCamelCase_ = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
UpperCamelCase_ = concatenate_datasets
UpperCamelCase_ = DownloadConfig
UpperCamelCase_ = DownloadManager
UpperCamelCase_ = DownloadMode
UpperCamelCase_ = DownloadConfig
UpperCamelCase_ = DownloadMode
UpperCamelCase_ = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 309
|
'''simple docstring'''
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("""socket.socket""" )
@patch("""builtins.open""" )
def _UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] ) -> Union[str, Any]:
# ===== initialization =====
_lowerCAmelCase : Tuple = Mock()
_lowerCAmelCase : Any = conn, Mock()
_lowerCAmelCase : Optional[Any] = iter([1, None] )
_lowerCAmelCase : str = lambda _lowerCamelCase : next(_lowerCamelCase )
# ===== invoke =====
send_file(filename="""mytext.txt""" , testing=_lowerCamelCase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 309
| 1
|
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
UpperCamelCase_ = logging.get_logger(__name__)
# General docstring
UpperCamelCase_ = """MobileNetV1Config"""
# Base docstring
UpperCamelCase_ = """google/mobilenet_v1_1.0_224"""
UpperCamelCase_ = [1, 10_24, 7, 7]
# Image classification docstring
UpperCamelCase_ = """google/mobilenet_v1_1.0_224"""
UpperCamelCase_ = """tabby, tabby cat"""
UpperCamelCase_ = [
"""google/mobilenet_v1_1.0_224""",
"""google/mobilenet_v1_0.75_192""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def _UpperCAmelCase ( _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int]=None ) -> Union[str, Any]:
_lowerCAmelCase : Optional[int] = {}
if isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = model.mobilenet_va
else:
_lowerCAmelCase : Tuple = model
_lowerCAmelCase : int = """MobilenetV1/Conv2d_0/"""
_lowerCAmelCase : int = backbone.conv_stem.convolution.weight
_lowerCAmelCase : int = backbone.conv_stem.normalization.bias
_lowerCAmelCase : Tuple = backbone.conv_stem.normalization.weight
_lowerCAmelCase : Any = backbone.conv_stem.normalization.running_mean
_lowerCAmelCase : List[str] = backbone.conv_stem.normalization.running_var
for i in range(13 ):
_lowerCAmelCase : Any = i + 1
_lowerCAmelCase : str = i * 2
_lowerCAmelCase : Optional[Any] = backbone.layer[pt_index]
_lowerCAmelCase : Union[str, Any] = f'MobilenetV1/Conv2d_{tf_index}_depthwise/'
_lowerCAmelCase : Dict = pointer.convolution.weight
_lowerCAmelCase : int = pointer.normalization.bias
_lowerCAmelCase : Optional[int] = pointer.normalization.weight
_lowerCAmelCase : Tuple = pointer.normalization.running_mean
_lowerCAmelCase : Any = pointer.normalization.running_var
_lowerCAmelCase : int = backbone.layer[pt_index + 1]
_lowerCAmelCase : Dict = f'MobilenetV1/Conv2d_{tf_index}_pointwise/'
_lowerCAmelCase : List[Any] = pointer.convolution.weight
_lowerCAmelCase : str = pointer.normalization.bias
_lowerCAmelCase : Optional[Any] = pointer.normalization.weight
_lowerCAmelCase : Tuple = pointer.normalization.running_mean
_lowerCAmelCase : Tuple = pointer.normalization.running_var
if isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : List[str] = """MobilenetV1/Logits/Conv2d_1c_1x1/"""
_lowerCAmelCase : Optional[Any] = model.classifier.weight
_lowerCAmelCase : int = model.classifier.bias
return tf_to_pt_map
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] ) -> Optional[Any]:
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"""Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see """
"""https://www.tensorflow.org/install/ for installation instructions.""" )
raise
# Load weights from TF model
_lowerCAmelCase : Optional[int] = tf.train.list_variables(_lowerCamelCase )
_lowerCAmelCase : List[str] = {}
for name, shape in init_vars:
logger.info(f'Loading TF weight {name} with shape {shape}' )
_lowerCAmelCase : Optional[Any] = tf.train.load_variable(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Optional[int] = array
# Build TF to PyTorch weights loading map
_lowerCAmelCase : int = _build_tf_to_pytorch_map(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
for name, pointer in tf_to_pt_map.items():
logger.info(f'Importing {name}' )
if name not in tf_weights:
logger.info(f'{name} not in tf pre-trained weights, skipping' )
continue
_lowerCAmelCase : Optional[Any] = tf_weights[name]
if "depthwise_weights" in name:
logger.info("""Transposing depthwise""" )
_lowerCAmelCase : List[str] = np.transpose(_lowerCamelCase , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("""Transposing""" )
if len(pointer.shape ) == 2: # copying into linear layer
_lowerCAmelCase : Union[str, Any] = array.squeeze().transpose()
else:
_lowerCAmelCase : Union[str, Any] = np.transpose(_lowerCamelCase , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(f'Pointer shape {pointer.shape} and array shape {array.shape} mismatched' )
logger.info(f'Initialize PyTorch weight {name} {array.shape}' )
_lowerCAmelCase : Union[str, Any] = torch.from_numpy(_lowerCamelCase )
tf_weights.pop(_lowerCamelCase , _lowerCamelCase )
tf_weights.pop(name + """/RMSProp""" , _lowerCamelCase )
tf_weights.pop(name + """/RMSProp_1""" , _lowerCamelCase )
tf_weights.pop(name + """/ExponentialMovingAverage""" , _lowerCamelCase )
logger.info(f'Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}' )
return model
def _UpperCAmelCase ( _lowerCamelCase : torch.Tensor , _lowerCamelCase : nn.Convad ) -> torch.Tensor:
_lowerCAmelCase , _lowerCAmelCase : Tuple = features.shape[-2:]
_lowerCAmelCase , _lowerCAmelCase : Any = conv_layer.stride
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = conv_layer.kernel_size
if in_height % stride_height == 0:
_lowerCAmelCase : Union[str, Any] = max(kernel_height - stride_height , 0 )
else:
_lowerCAmelCase : Optional[int] = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
_lowerCAmelCase : int = max(kernel_width - stride_width , 0 )
else:
_lowerCAmelCase : Tuple = max(kernel_width - (in_width % stride_width) , 0 )
_lowerCAmelCase : Dict = pad_along_width // 2
_lowerCAmelCase : Dict = pad_along_width - pad_left
_lowerCAmelCase : Optional[Any] = pad_along_height // 2
_lowerCAmelCase : Any = pad_along_height - pad_top
_lowerCAmelCase : Dict = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(_lowerCamelCase , _lowerCamelCase , """constant""" , 0.0 )
class a_ (nn.Module ):
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = 1 , snake_case_ = 1 , snake_case_ = False , snake_case_ = True , snake_case_ = True , ):
super().__init__()
_lowerCAmelCase : Any = config
if in_channels % groups != 0:
raise ValueError(f'Input channels ({in_channels}) are not divisible by {groups} groups.' )
if out_channels % groups != 0:
raise ValueError(f'Output channels ({out_channels}) are not divisible by {groups} groups.' )
_lowerCAmelCase : int = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
_lowerCAmelCase : List[str] = nn.Convad(
in_channels=snake_case_ , out_channels=snake_case_ , kernel_size=snake_case_ , stride=snake_case_ , padding=snake_case_ , groups=snake_case_ , bias=snake_case_ , padding_mode="""zeros""" , )
if use_normalization:
_lowerCAmelCase : Union[str, Any] = nn.BatchNormad(
num_features=snake_case_ , eps=config.layer_norm_eps , momentum=0.9997 , affine=snake_case_ , track_running_stats=snake_case_ , )
else:
_lowerCAmelCase : Optional[int] = None
if use_activation:
if isinstance(snake_case_ , snake_case_ ):
_lowerCAmelCase : List[Any] = ACTaFN[use_activation]
elif isinstance(config.hidden_act , snake_case_ ):
_lowerCAmelCase : Optional[int] = ACTaFN[config.hidden_act]
else:
_lowerCAmelCase : int = config.hidden_act
else:
_lowerCAmelCase : Dict = None
def __UpperCamelCase ( self , snake_case_ ):
if self.config.tf_padding:
_lowerCAmelCase : int = apply_tf_padding(snake_case_ , self.convolution )
_lowerCAmelCase : Tuple = self.convolution(snake_case_ )
if self.normalization is not None:
_lowerCAmelCase : Dict = self.normalization(snake_case_ )
if self.activation is not None:
_lowerCAmelCase : Optional[Any] = self.activation(snake_case_ )
return features
class a_ (_a ):
__lowerCAmelCase : Union[str, Any] = MobileNetVaConfig
__lowerCAmelCase : Union[str, Any] = load_tf_weights_in_mobilenet_va
__lowerCAmelCase : Optional[Any] = """mobilenet_v1"""
__lowerCAmelCase : str = """pixel_values"""
__lowerCAmelCase : int = False
def __UpperCamelCase ( self , snake_case_ ):
if isinstance(snake_case_ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(snake_case_ , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
UpperCamelCase_ = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
UpperCamelCase_ = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"""The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.""" , _a , )
class a_ (_a ):
def __init__( self , snake_case_ , snake_case_ = True ):
super().__init__(snake_case_ )
_lowerCAmelCase : Optional[int] = config
_lowerCAmelCase : List[Any] = 3_2
_lowerCAmelCase : Any = max(int(depth * config.depth_multiplier ) , config.min_depth )
_lowerCAmelCase : List[str] = MobileNetVaConvLayer(
snake_case_ , in_channels=config.num_channels , out_channels=snake_case_ , kernel_size=3 , stride=2 , )
_lowerCAmelCase : Optional[int] = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
_lowerCAmelCase : Tuple = nn.ModuleList()
for i in range(1_3 ):
_lowerCAmelCase : Tuple = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
_lowerCAmelCase : Any = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
snake_case_ , in_channels=snake_case_ , out_channels=snake_case_ , kernel_size=3 , stride=strides[i] , groups=snake_case_ , ) )
self.layer.append(
MobileNetVaConvLayer(
snake_case_ , in_channels=snake_case_ , out_channels=snake_case_ , kernel_size=1 , ) )
_lowerCAmelCase : int = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def __UpperCamelCase ( self , snake_case_ ):
raise NotImplementedError
@add_start_docstrings_to_model_forward(snake_case_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __UpperCamelCase ( self , snake_case_ = None , snake_case_ = None , snake_case_ = None , ):
_lowerCAmelCase : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCAmelCase : Dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
_lowerCAmelCase : Dict = self.conv_stem(snake_case_ )
_lowerCAmelCase : Optional[Any] = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
_lowerCAmelCase : Any = layer_module(snake_case_ )
if output_hidden_states:
_lowerCAmelCase : Union[str, Any] = all_hidden_states + (hidden_states,)
_lowerCAmelCase : Union[str, Any] = hidden_states
if self.pooler is not None:
_lowerCAmelCase : List[Any] = torch.flatten(self.pooler(snake_case_ ) , start_dim=1 )
else:
_lowerCAmelCase : int = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=snake_case_ , pooler_output=snake_case_ , hidden_states=snake_case_ , )
@add_start_docstrings(
"""
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , _a , )
class a_ (_a ):
def __init__( self , snake_case_ ):
super().__init__(snake_case_ )
_lowerCAmelCase : Tuple = config.num_labels
_lowerCAmelCase : List[str] = MobileNetVaModel(snake_case_ )
_lowerCAmelCase : str = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
_lowerCAmelCase : Union[str, Any] = nn.Dropout(config.classifier_dropout_prob , inplace=snake_case_ )
_lowerCAmelCase : Dict = nn.Linear(snake_case_ , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(snake_case_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __UpperCamelCase ( self , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , ):
_lowerCAmelCase : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase : List[Any] = self.mobilenet_va(snake_case_ , output_hidden_states=snake_case_ , return_dict=snake_case_ )
_lowerCAmelCase : Dict = outputs.pooler_output if return_dict else outputs[1]
_lowerCAmelCase : Tuple = self.classifier(self.dropout(snake_case_ ) )
_lowerCAmelCase : int = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_lowerCAmelCase : str = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_lowerCAmelCase : Tuple = """single_label_classification"""
else:
_lowerCAmelCase : Tuple = """multi_label_classification"""
if self.config.problem_type == "regression":
_lowerCAmelCase : Optional[int] = MSELoss()
if self.num_labels == 1:
_lowerCAmelCase : int = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_lowerCAmelCase : List[Any] = loss_fct(snake_case_ , snake_case_ )
elif self.config.problem_type == "single_label_classification":
_lowerCAmelCase : str = CrossEntropyLoss()
_lowerCAmelCase : Optional[Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_lowerCAmelCase : Union[str, Any] = BCEWithLogitsLoss()
_lowerCAmelCase : str = loss_fct(snake_case_ , snake_case_ )
if not return_dict:
_lowerCAmelCase : Optional[int] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=snake_case_ , logits=snake_case_ , hidden_states=outputs.hidden_states , )
| 309
|
'''simple docstring'''
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a_ :
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=3_0 , snake_case_=2 , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=3_2 , snake_case_=5 , snake_case_=4 , snake_case_=3_7 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=1_0 , snake_case_=0.02 , snake_case_=None , ):
_lowerCAmelCase : Optional[Any] = parent
_lowerCAmelCase : Any = batch_size
_lowerCAmelCase : Tuple = image_size
_lowerCAmelCase : int = patch_size
_lowerCAmelCase : Any = num_channels
_lowerCAmelCase : str = is_training
_lowerCAmelCase : Any = use_labels
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : int = num_hidden_layers
_lowerCAmelCase : Dict = num_attention_heads
_lowerCAmelCase : Union[str, Any] = intermediate_size
_lowerCAmelCase : Dict = hidden_act
_lowerCAmelCase : str = hidden_dropout_prob
_lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
_lowerCAmelCase : Any = type_sequence_label_size
_lowerCAmelCase : str = initializer_range
_lowerCAmelCase : Optional[Any] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase : List[Any] = (image_size // patch_size) ** 2
_lowerCAmelCase : Dict = num_patches + 1
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : List[str] = None
if self.use_labels:
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ):
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : List[Any] = ViTMSNModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : Tuple = self.type_sequence_label_size
_lowerCAmelCase : int = ViTMSNForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : Optional[int] = model(snake_case_ , labels=snake_case_ )
print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" )
print("""Labels: {labels}""" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCAmelCase : int = 1
_lowerCAmelCase : List[str] = ViTMSNForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[int] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = config_and_inputs
_lowerCAmelCase : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a_ (_a , _a , unittest.TestCase ):
__lowerCAmelCase : Tuple = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__lowerCAmelCase : Optional[int] = (
{"""feature-extraction""": ViTMSNModel, """image-classification""": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__lowerCAmelCase : Dict = False
__lowerCAmelCase : Optional[Any] = False
__lowerCAmelCase : List[str] = False
__lowerCAmelCase : Any = False
def __UpperCamelCase ( self ):
_lowerCAmelCase : Tuple = ViTMSNModelTester(self )
_lowerCAmelCase : int = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMSN does not use inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : List[str] = model_class(snake_case_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case_ , nn.Linear ) )
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[int] = model_class(snake_case_ )
_lowerCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Optional[Any] = [*signature.parameters.keys()]
_lowerCAmelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@slow
def __UpperCamelCase ( self ):
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Optional[int] = ViTMSNModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def _UpperCAmelCase ( ) -> Tuple:
_lowerCAmelCase : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a_ (unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self ):
return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None
@slow
def __UpperCamelCase ( self ):
torch.manual_seed(2 )
_lowerCAmelCase : Dict = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(snake_case_ )
_lowerCAmelCase : Dict = self.default_image_processor
_lowerCAmelCase : Any = prepare_img()
_lowerCAmelCase : List[str] = image_processor(images=snake_case_ , return_tensors="""pt""" ).to(snake_case_ )
# forward pass
with torch.no_grad():
_lowerCAmelCase : Dict = model(**snake_case_ )
# verify the logits
_lowerCAmelCase : Dict = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case_ )
_lowerCAmelCase : Tuple = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1E-4 ) )
| 309
| 1
|
'''simple docstring'''
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class a_ :
def __init__( self , snake_case_ ):
if isinstance(snake_case_ , snake_case_ ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
_lowerCAmelCase : Optional[Any] = deepcopy(snake_case_ )
elif os.path.exists(snake_case_ ):
with io.open(snake_case_ , """r""" , encoding="""utf-8""" ) as f:
_lowerCAmelCase : Optional[Any] = json.load(snake_case_ )
else:
try:
_lowerCAmelCase : Any = baseaa.urlsafe_baadecode(snake_case_ ).decode("""utf-8""" )
_lowerCAmelCase : Tuple = json.loads(snake_case_ )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f'Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}' )
_lowerCAmelCase : Any = config
self.set_stage_and_offload()
def __UpperCamelCase ( self ):
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
_lowerCAmelCase : Tuple = self.get_value("""zero_optimization.stage""" , -1 )
# offload
_lowerCAmelCase : Any = False
if self.is_zeroa() or self.is_zeroa():
_lowerCAmelCase : List[Any] = set(["""cpu""", """nvme"""] )
_lowerCAmelCase : Optional[Any] = set(
[
self.get_value("""zero_optimization.offload_optimizer.device""" ),
self.get_value("""zero_optimization.offload_param.device""" ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
_lowerCAmelCase : List[str] = True
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Union[str, Any] = self.config
# find the config node of interest if it exists
_lowerCAmelCase : List[str] = ds_key_long.split(""".""" )
_lowerCAmelCase : List[Any] = nodes.pop()
for node in nodes:
_lowerCAmelCase : Dict = config.get(snake_case_ )
if config is None:
return None, ds_key
return config, ds_key
def __UpperCamelCase ( self , snake_case_ , snake_case_=None ):
_lowerCAmelCase , _lowerCAmelCase : Any = self.find_config_node(snake_case_ )
if config is None:
return default
return config.get(snake_case_ , snake_case_ )
def __UpperCamelCase ( self , snake_case_ , snake_case_=False ):
_lowerCAmelCase : int = self.config
# find the config node of interest if it exists
_lowerCAmelCase : Tuple = ds_key_long.split(""".""" )
for node in nodes:
_lowerCAmelCase : List[str] = config
_lowerCAmelCase : int = config.get(snake_case_ )
if config is None:
if must_exist:
raise ValueError(f'Can\'t find {ds_key_long} entry in the config: {self.config}' )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = self.get_value(snake_case_ )
return False if value is None else bool(snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Dict = self.get_value(snake_case_ )
return False if value is None else not bool(snake_case_ )
def __UpperCamelCase ( self ):
return self._stage == 2
def __UpperCamelCase ( self ):
return self._stage == 3
def __UpperCamelCase ( self ):
return self._offload
class a_ :
def __init__( self , snake_case_ ):
_lowerCAmelCase : Dict = engine
def __UpperCamelCase ( self , snake_case_ , **snake_case_ ):
# runs backpropagation and handles mixed precision
self.engine.backward(snake_case_ , **snake_case_ )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class a_ (_a ):
def __init__( self , snake_case_ ):
super().__init__(snake_case_ , device_placement=snake_case_ , scaler=snake_case_ )
_lowerCAmelCase : Dict = hasattr(self.optimizer , """overflow""" )
def __UpperCamelCase ( self , snake_case_=None ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def __UpperCamelCase ( self ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def __UpperCamelCase ( self ):
if self.__has_overflow__:
return self.optimizer.overflow
return False
class a_ (_a ):
def __init__( self , snake_case_ , snake_case_ ):
super().__init__(snake_case_ , snake_case_ )
def __UpperCamelCase ( self ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class a_ :
def __init__( self , snake_case_ , snake_case_=0.001 , snake_case_=0 , **snake_case_ ):
_lowerCAmelCase : Any = params
_lowerCAmelCase : Optional[int] = lr
_lowerCAmelCase : List[Any] = weight_decay
_lowerCAmelCase : int = kwargs
class a_ :
def __init__( self , snake_case_ , snake_case_=None , snake_case_=0 , **snake_case_ ):
_lowerCAmelCase : Any = optimizer
_lowerCAmelCase : Any = total_num_steps
_lowerCAmelCase : str = warmup_num_steps
_lowerCAmelCase : int = kwargs
| 309
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class a_ (_a ):
__lowerCAmelCase : List[Any] = """microsoft/speecht5_tts"""
__lowerCAmelCase : List[Any] = (
"""This is a tool that reads an English text out loud. It takes an input named `text` which should contain the """
"""text to read (in English) and returns a waveform object containing the sound."""
)
__lowerCAmelCase : List[str] = """text_reader"""
__lowerCAmelCase : Optional[Any] = SpeechTaProcessor
__lowerCAmelCase : str = SpeechTaForTextToSpeech
__lowerCAmelCase : int = SpeechTaHifiGan
__lowerCAmelCase : int = ["""text"""]
__lowerCAmelCase : int = ["""audio"""]
def __UpperCamelCase ( self ):
if self.post_processor is None:
_lowerCAmelCase : int = """microsoft/speecht5_hifigan"""
super().setup()
def __UpperCamelCase ( self , snake_case_ , snake_case_=None ):
_lowerCAmelCase : Tuple = self.pre_processor(text=snake_case_ , return_tensors="""pt""" , truncation=snake_case_ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" )
_lowerCAmelCase : List[str] = load_dataset("""Matthijs/cmu-arctic-xvectors""" , split="""validation""" )
_lowerCAmelCase : Any = torch.tensor(embeddings_dataset[7_3_0_5]["""xvector"""] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def __UpperCamelCase ( self , snake_case_ ):
with torch.no_grad():
return self.model.generate_speech(**snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
with torch.no_grad():
return self.post_processor(snake_case_ ).cpu().detach()
| 309
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.