code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : float) -> Any:
'''simple docstring'''
return 10 - x * x
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : float , _lowerCamelCase : float) -> Union[str, Any]:
'''simple docstring'''
if equation(snake_case_) * equation(snake_case_) >= 0:
raise ValueError("Wrong space!")
__UpperCamelCase : List[str] = a
while (b - a) >= 0.0_1:
# Find middle point
__UpperCamelCase : Tuple = (a + b) / 2
# Check if middle point is root
if equation(snake_case_) == 0.0:
break
# Decide the side to repeat the steps
if equation(snake_case_) * equation(snake_case_) < 0:
__UpperCamelCase : Optional[Any] = c
else:
__UpperCamelCase : Optional[int] = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 232
|
"""simple docstring"""
from __future__ import annotations
class _UpperCAmelCase :
def __init__( self : Tuple , _lowercase : str , _lowercase : str ):
__UpperCAmelCase , __UpperCAmelCase = text, pattern
__UpperCAmelCase , __UpperCAmelCase = len(_lowercase ), len(_lowercase )
def a ( self : Optional[int] , _lowercase : str ):
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def a ( self : int , _lowercase : int ):
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def a ( self : Optional[Any] ):
# searches pattern in text and returns index positions
__UpperCAmelCase = []
for i in range(self.textLen - self.patLen + 1 ):
__UpperCAmelCase = self.mismatch_in_text(_lowercase )
if mismatch_index == -1:
positions.append(_lowercase )
else:
__UpperCAmelCase = self.match_in_pattern(self.text[mismatch_index] )
__UpperCAmelCase = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
_lowercase : str = 'ABAABA'
_lowercase : Tuple = 'AB'
_lowercase : Dict = BoyerMooreSearch(text, pattern)
_lowercase : Any = bms.bad_character_heuristic()
if len(positions) == 0:
print('No match found')
else:
print('Pattern found in following positions: ')
print(positions)
| 332
| 0
|
"""simple docstring"""
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
SCREAMING_SNAKE_CASE__:Optional[Any] = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
SCREAMING_SNAKE_CASE__:List[str] = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
SCREAMING_SNAKE_CASE__:Dict = re.compile(R"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
SCREAMING_SNAKE_CASE__:List[str] = re.compile(R"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
SCREAMING_SNAKE_CASE__:Union[str, Any] = re.compile(R"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
SCREAMING_SNAKE_CASE__:str = [
("""pretraining""", """MODEL_FOR_PRETRAINING_MAPPING_NAMES""", """AutoModelForPreTraining"""),
("""feature-extraction""", """MODEL_MAPPING_NAMES""", """AutoModel"""),
("""audio-classification""", """MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForAudioClassification"""),
("""text-generation""", """MODEL_FOR_CAUSAL_LM_MAPPING_NAMES""", """AutoModelForCausalLM"""),
("""automatic-speech-recognition""", """MODEL_FOR_CTC_MAPPING_NAMES""", """AutoModelForCTC"""),
("""image-classification""", """MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForImageClassification"""),
("""image-segmentation""", """MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES""", """AutoModelForImageSegmentation"""),
("""fill-mask""", """MODEL_FOR_MASKED_LM_MAPPING_NAMES""", """AutoModelForMaskedLM"""),
("""object-detection""", """MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES""", """AutoModelForObjectDetection"""),
(
"""zero-shot-object-detection""",
"""MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES""",
"""AutoModelForZeroShotObjectDetection""",
),
("""question-answering""", """MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES""", """AutoModelForQuestionAnswering"""),
("""text2text-generation""", """MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES""", """AutoModelForSeq2SeqLM"""),
("""text-classification""", """MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForSequenceClassification"""),
("""automatic-speech-recognition""", """MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES""", """AutoModelForSpeechSeq2Seq"""),
(
"""table-question-answering""",
"""MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES""",
"""AutoModelForTableQuestionAnswering""",
),
("""token-classification""", """MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForTokenClassification"""),
("""multiple-choice""", """MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES""", """AutoModelForMultipleChoice"""),
(
"""next-sentence-prediction""",
"""MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES""",
"""AutoModelForNextSentencePrediction""",
),
(
"""audio-frame-classification""",
"""MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES""",
"""AutoModelForAudioFrameClassification""",
),
("""audio-xvector""", """MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES""", """AutoModelForAudioXVector"""),
(
"""document-question-answering""",
"""MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES""",
"""AutoModelForDocumentQuestionAnswering""",
),
(
"""visual-question-answering""",
"""MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES""",
"""AutoModelForVisualQuestionAnswering""",
),
("""image-to-text""", """MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES""", """AutoModelForVision2Seq"""),
(
"""zero-shot-image-classification""",
"""MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES""",
"""AutoModelForZeroShotImageClassification""",
),
("""depth-estimation""", """MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES""", """AutoModelForDepthEstimation"""),
("""video-classification""", """MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForVideoClassification"""),
("""mask-generation""", """MODEL_FOR_MASK_GENERATION_MAPPING_NAMES""", """AutoModelForMaskGeneration"""),
]
def _lowerCamelCase( a ):
__a = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" , a )
return [m.group(0 ) for m in matches]
def _lowerCamelCase( ):
__a = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__a = {
config.replace("Config" , "" ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
__a = collections.defaultdict(a )
__a = collections.defaultdict(a )
__a = collections.defaultdict(a )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(a ):
__a = None
if _re_tf_models.match(a ) is not None:
__a = tf_models
__a = _re_tf_models.match(a ).groups()[0]
elif _re_flax_models.match(a ) is not None:
__a = flax_models
__a = _re_flax_models.match(a ).groups()[0]
elif _re_pt_models.match(a ) is not None:
__a = pt_models
__a = _re_pt_models.match(a ).groups()[0]
if lookup_dict is not None:
while len(a ) > 0:
if attr_name in model_prefix_to_model_type:
__a = True
break
# Try again after removing the last word in the name
__a = "".join(camel_case_split(a )[:-1] )
__a = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
__a = list(a )
all_models.sort()
__a = {"model_type": all_models}
__a = [pt_models[t] for t in all_models]
__a = [tf_models[t] for t in all_models]
__a = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
__a = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
__a = "AutoProcessor"
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
__a = "AutoTokenizer"
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
__a = "AutoFeatureExtractor"
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
__a = "AutoTokenizer"
__a = [processors[t] for t in all_models]
return pd.DataFrame(a )
def _lowerCamelCase( a ):
__a = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
__a = [model_mapping, F"TF_{model_mapping}", F"FLAX_{model_mapping}"]
__a = [auto_class, F"TF_{auto_class}", F"Flax_{auto_class}"]
# Loop through all three frameworks
for module, cls, mapping in zip(a , a , a ):
# The type of pipeline may not exist in this framework
if not hasattr(a , a ):
continue
# First extract all model_names
__a = []
for name in getattr(a , a ).values():
if isinstance(a , a ):
model_names.append(a )
else:
model_names.extend(list(a ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def _lowerCamelCase( a , a ):
__a = get_frameworks_table()
__a = Dataset.from_pandas(a )
__a = hf_hub_download(
"huggingface/transformers-metadata" , "pipeline_tags.json" , repo_type="dataset" , token=a )
__a = Dataset.from_json(a )
__a = {
tags_dataset[i]["model_class"]: (tags_dataset[i]["pipeline_tag"], tags_dataset[i]["auto_class"])
for i in range(len(a ) )
}
__a = update_pipeline_and_auto_class_table(a )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
__a = sorted(table.keys() )
__a = pd.DataFrame(
{
"model_class": model_classes,
"pipeline_tag": [table[m][0] for m in model_classes],
"auto_class": [table[m][1] for m in model_classes],
} )
__a = Dataset.from_pandas(a )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(a , "frameworks.json" ) )
tags_dataset.to_json(os.path.join(a , "pipeline_tags.json" ) )
if commit_sha is not None:
__a = (
F"Update with commit {commit_sha}\n\nSee: "
F"https://github.com/huggingface/transformers/commit/{commit_sha}"
)
else:
__a = "Update"
upload_folder(
repo_id="huggingface/transformers-metadata" , folder_path=a , repo_type="dataset" , token=a , commit_message=a , )
def _lowerCamelCase( ):
__a = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
__a = transformers_module.pipelines.SUPPORTED_TASKS
__a = []
for key in pipeline_tasks:
if key not in in_table:
__a = pipeline_tasks[key]["pt"]
if isinstance(a , (list, tuple) ):
__a = model[0]
__a = model.__name__
if model not in in_table.values():
missing.append(a )
if len(a ) > 0:
__a = ", ".join(a )
raise ValueError(
"The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside "
F"`utils/update_metadata.py`: {msg}. Please add them!" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--token""", type=str, help="""The token to use to push to the transformers-metadata dataset.""")
parser.add_argument("""--commit_sha""", type=str, help="""The sha of the commit going with this update.""")
parser.add_argument("""--check-only""", action="""store_true""", help="""Activate to just check all pipelines are present.""")
SCREAMING_SNAKE_CASE__:int = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 268
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class snake_case__ ( snake_case_ ):
_snake_case : UNetaDModel
_snake_case : KarrasVeScheduler
def __init__( self , lowerCamelCase , lowerCamelCase ):
super().__init__()
self.register_modules(unet=lowerCamelCase , scheduler=lowerCamelCase )
@torch.no_grad()
def __call__( self , lowerCamelCase = 1 , lowerCamelCase = 50 , lowerCamelCase = None , lowerCamelCase = "pil" , lowerCamelCase = True , **lowerCamelCase , ):
__a = self.unet.config.sample_size
__a = (batch_size, 3, img_size, img_size)
__a = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
__a = randn_tensor(lowerCamelCase , generator=lowerCamelCase , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(lowerCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
__a = self.scheduler.schedule[t]
__a = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
__a , __a = self.scheduler.add_noise_to_input(lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
__a = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
__a = self.scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
__a = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
__a = self.scheduler.step_correct(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , step_output.prev_sample , step_output["derivative"] , )
__a = step_output.prev_sample
__a = (sample / 2 + 0.5).clamp(0 , 1 )
__a = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase )
| 268
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Union[str, Any] = logging.get_logger(__name__)
a : Optional[Any] = {
'''tiiuae/falcon-40b''': '''https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json''',
'''tiiuae/falcon-7b''': '''https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json''',
}
class __UpperCamelCase ( a__ ):
lowerCamelCase : Optional[int] ="""falcon"""
lowerCamelCase : Union[str, Any] =["""past_key_values"""]
def __init__( self , lowerCAmelCase__=6_5024 , lowerCAmelCase__=4544 , lowerCAmelCase__=32 , lowerCAmelCase__=71 , lowerCAmelCase__=1E-5 , lowerCAmelCase__=0.02 , lowerCAmelCase__=True , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=None , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=11 , lowerCAmelCase__=11 , **lowerCAmelCase__ , ) -> Any:
a : Optional[int] = vocab_size
# Backward compatibility with n_embed kwarg
a : Tuple = kwargs.pop("n_embed" , lowerCAmelCase__ )
a : int = hidden_size if n_embed is None else n_embed
a : Dict = num_hidden_layers
a : Any = num_attention_heads
a : List[str] = layer_norm_epsilon
a : Any = initializer_range
a : Dict = use_cache
a : Optional[int] = hidden_dropout
a : Optional[int] = attention_dropout
a : Optional[int] = bos_token_id
a : List[Any] = eos_token_id
a : str = num_attention_heads if num_kv_heads is None else num_kv_heads
a : Union[str, Any] = alibi
a : Any = new_decoder_architecture
a : Dict = multi_query # Ignored when new_decoder_architecture is True
a : List[Any] = parallel_attn
a : List[str] = bias
super().__init__(bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def __a ( self ) -> Optional[int]:
return self.hidden_size // self.num_attention_heads
@property
def __a ( self ) -> Dict:
return not self.alibi
| 105
|
'''simple docstring'''
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
__SCREAMING_SNAKE_CASE :int = logging.get_logger(__name__)
class A_ :
_lowerCamelCase : str
_lowerCamelCase : str = None
@staticmethod
def lowercase ( ):
raise NotImplementedError
def lowercase ( self : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : int , snake_case_ : str , **snake_case_ : List[Any] ):
raise NotImplementedError
def lowercase ( self : Any , snake_case_ : int ):
raise NotImplementedError
def lowercase ( self : List[str] ):
if not self.is_available():
raise RuntimeError(
f'You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.' )
@classmethod
def lowercase ( cls : List[Any] ):
return f'`pip install {cls.pip_package or cls.name}`'
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : int = """optuna"""
@staticmethod
def lowercase ( ):
return is_optuna_available()
def lowercase ( self : List[str] , snake_case_ : Any , snake_case_ : int , snake_case_ : str , **snake_case_ : Tuple ):
return run_hp_search_optuna(snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
def lowercase ( self : int , snake_case_ : Optional[int] ):
return default_hp_space_optuna(snake_case_ )
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Any = """ray"""
_lowerCamelCase : Tuple = """'ray[tune]'"""
@staticmethod
def lowercase ( ):
return is_ray_available()
def lowercase ( self : Optional[Any] , snake_case_ : Any , snake_case_ : int , snake_case_ : str , **snake_case_ : List[str] ):
return run_hp_search_ray(snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
def lowercase ( self : Any , snake_case_ : str ):
return default_hp_space_ray(snake_case_ )
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : int = """sigopt"""
@staticmethod
def lowercase ( ):
return is_sigopt_available()
def lowercase ( self : Any , snake_case_ : int , snake_case_ : int , snake_case_ : str , **snake_case_ : Dict ):
return run_hp_search_sigopt(snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
def lowercase ( self : Dict , snake_case_ : Optional[Any] ):
return default_hp_space_sigopt(snake_case_ )
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Optional[int] = """wandb"""
@staticmethod
def lowercase ( ):
return is_wandb_available()
def lowercase ( self : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : int , snake_case_ : str , **snake_case_ : Optional[Any] ):
return run_hp_search_wandb(snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
def lowercase ( self : Any , snake_case_ : Union[str, Any] ):
return default_hp_space_wandb(snake_case_ )
__SCREAMING_SNAKE_CASE :Dict = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def UpperCAmelCase_ ( ) -> str:
'''simple docstring'''
_UpperCAmelCase = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(__lowercase ) > 0:
_UpperCAmelCase = available_backends[0].name
if len(__lowercase ) > 1:
logger.info(
f'{len(__lowercase )} hyperparameter search backends available. Using {name} as the default.' )
return name
raise RuntimeError(
"No hyperparameter search backend available.\n"
+ "\n".join(
f' - To install {backend.name} run {backend.pip_install()}'
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 22
| 0
|
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class __UpperCamelCase :
@staticmethod
def __a ( *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
pass
def _SCREAMING_SNAKE_CASE ( _lowercase : Dict ) ->Optional[int]:
'''simple docstring'''
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
a : List[Any] = (
'''https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'''
)
@is_pipeline_test
@require_torch
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
lowerCamelCase : Dict =MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
a : Union[str, Any] = pipeline(
"document-question-answering" , model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
a : Optional[Any] = INVOICE_URL
a : Union[str, Any] = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , "" ) ) )
a : Union[str, Any] = "What is the placebo?"
a : Optional[Any] = [
{
"image": load_image(lowerCAmelCase__ ),
"question": question,
},
{
"image": image,
"question": question,
},
{
"image": image,
"question": question,
"word_boxes": word_boxes,
},
]
return dqa_pipeline, examples
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
a : Optional[Any] = dqa_pipeline(lowerCAmelCase__ , top_k=2 )
self.assertEqual(
lowerCAmelCase__ , [
[
{"score": ANY(lowerCAmelCase__ ), "answer": ANY(lowerCAmelCase__ ), "start": ANY(lowerCAmelCase__ ), "end": ANY(lowerCAmelCase__ )},
{"score": ANY(lowerCAmelCase__ ), "answer": ANY(lowerCAmelCase__ ), "start": ANY(lowerCAmelCase__ ), "end": ANY(lowerCAmelCase__ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def __a ( self ) -> Optional[Any]:
a : Tuple = pipeline("document-question-answering" , model="hf-internal-testing/tiny-random-layoutlmv2" )
a : Optional[int] = INVOICE_URL
a : Optional[int] = "How many cats are there?"
a : int = [
{"score": 0.0_001, "answer": "oy 2312/2019", "start": 38, "end": 39},
{"score": 0.0_001, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40},
]
a : Dict = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , lowerCAmelCase__ )
a : Any = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , lowerCAmelCase__ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
a : Dict = "./tests/fixtures/tests_samples/COCO/000000039769.png"
a : List[str] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(lowerCAmelCase__ , [] )
# We can optionnally pass directly the words and bounding boxes
a : Optional[int] = "./tests/fixtures/tests_samples/COCO/000000039769.png"
a : int = []
a : List[str] = []
a : Optional[int] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , words=lowerCAmelCase__ , boxes=lowerCAmelCase__ , top_k=2 )
self.assertEqual(lowerCAmelCase__ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __a ( self ) -> Optional[int]:
a : int = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , )
a : Union[str, Any] = INVOICE_URL
a : Any = "What is the invoice number?"
a : Optional[Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16},
] , )
a : Optional[Any] = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16},
] , )
a : List[Any] = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __a ( self ) -> Optional[int]:
a : List[str] = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , max_seq_len=50 , )
a : Union[str, Any] = INVOICE_URL
a : Tuple = "What is the invoice number?"
a : Union[str, Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16},
] , )
a : Optional[Any] = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16},
] , )
a : List[Any] = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __a ( self ) -> int:
a : Optional[int] = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=lowerCAmelCase__ )
a : int = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=lowerCAmelCase__ , revision="3dc6de3" , )
a : Tuple = INVOICE_URL
a : List[str] = "What is the invoice number?"
a : Union[str, Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
] , )
a : int = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
] , )
a : Any = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
]
]
* 2 , )
a : List[str] = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , "" ) ) )
# This model should also work if `image` is set to None
a : Optional[int] = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __a ( self ) -> Tuple:
a : str = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=lowerCAmelCase__ )
a : Union[str, Any] = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=lowerCAmelCase__ , revision="3dc6de3" , max_seq_len=50 , )
a : Optional[Any] = INVOICE_URL
a : str = "What is the invoice number?"
a : Dict = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16},
] , )
a : Optional[Any] = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
a : List[Any] = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , "" ) ) )
# This model should also work if `image` is set to None
a : Tuple = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16},
] , )
@slow
@require_torch
def __a ( self ) -> Dict:
a : str = pipeline(
"document-question-answering" , model="naver-clova-ix/donut-base-finetuned-docvqa" , tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ) , feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa" , )
a : Optional[Any] = INVOICE_URL
a : List[Any] = "What is the invoice number?"
a : Dict = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , [{"answer": "us-001"}] )
@require_tf
@unittest.skip("Document question answering not implemented in TF" )
def __a ( self ) -> Optional[Any]:
pass
| 79
|
"""simple docstring"""
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
if isinstance(_lowercase , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class __UpperCamelCase :
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
pass
def __a ( self ) -> List[Any]:
pass
def __a ( self ) -> str:
pass
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
a : Dict = np.abs((a - b) ).max()
self.assertLessEqual(lowerCAmelCase__ , lowerCAmelCase__ , f"""Difference between torch and flax is {diff} (>= {tol}).""" )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> Dict:
a : Dict = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ )
a : List[str] = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ )
a : int = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> Optional[Any]:
a, a : Optional[int] = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ )
a : Dict = {"vision_model": vision_model, "text_model": text_model}
a : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ )
a : List[str] = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> Union[str, Any]:
a, a : Dict = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ )
a : Tuple = {"vision_model": vision_model, "text_model": text_model}
a : Tuple = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ )
a : List[str] = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
a : Any = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase__ )
a : str = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ )
a : Dict = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
a : List[Any] = after_output[0]
a : Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase__ , 1E-3 )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> List[Any]:
a, a : Union[str, Any] = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ )
a : List[Any] = {"vision_model": vision_model, "text_model": text_model}
a : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ )
a : Tuple = model(
input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , output_attentions=lowerCAmelCase__ )
a : int = output.vision_model_output.attentions
self.assertEqual(len(lowerCAmelCase__ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
a : Optional[int] = to_atuple(vision_model.config.image_size )
a : Tuple = to_atuple(vision_model.config.patch_size )
a : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
a : Dict = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
a : str = output.text_model_output.attentions
self.assertEqual(len(lowerCAmelCase__ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
pt_model.to(lowerCAmelCase__ )
pt_model.eval()
# prepare inputs
a : List[Any] = inputs_dict
a : Any = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
a : int = pt_model(**lowerCAmelCase__ ).to_tuple()
a : Union[str, Any] = fx_model(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowerCAmelCase__ , pt_output.numpy() , 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCAmelCase__ )
a : Dict = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ , from_pt=lowerCAmelCase__ )
a : Optional[int] = fx_model_loaded(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowerCAmelCase__ , pt_output.numpy() , 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCAmelCase__ )
a : Optional[int] = VisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ , from_flax=lowerCAmelCase__ )
pt_model_loaded.to(lowerCAmelCase__ )
pt_model_loaded.eval()
with torch.no_grad():
a : int = pt_model_loaded(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(lowerCAmelCase__ , pt_output_loaded.numpy() , 4E-2 )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
a : List[Any] = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ )
a : Dict = VisionTextDualEncoderModel(lowerCAmelCase__ )
a : Optional[int] = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ )
a : Dict = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCAmelCase__ )
a : List[str] = fx_state
self.check_pt_flax_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
a : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ )
a : Optional[int] = VisionTextDualEncoderModel(lowerCAmelCase__ )
a : List[Any] = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ )
a : int = load_flax_weights_in_pytorch_model(lowerCAmelCase__ , fx_model.params )
self.check_pt_flax_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self ) -> Dict:
a : Any = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowerCAmelCase__ )
def __a ( self ) -> Dict:
a : List[str] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowerCAmelCase__ )
def __a ( self ) -> List[str]:
a : int = self.prepare_config_and_inputs()
self.check_save_load(**lowerCAmelCase__ )
def __a ( self ) -> List[str]:
a : Tuple = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowerCAmelCase__ )
@is_pt_flax_cross_test
def __a ( self ) -> Any:
a : List[Any] = self.prepare_config_and_inputs()
a : Tuple = config_inputs_dict.pop("vision_config" )
a : int = config_inputs_dict.pop("text_config" )
a : List[str] = config_inputs_dict
self.check_equivalence_pt_to_flax(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
self.check_equivalence_flax_to_pt(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def __a ( self ) -> List[Any]:
a, a : Optional[int] = self.get_pretrained_model_and_inputs()
a : Optional[int] = model_a(**lowerCAmelCase__ )
a : Optional[int] = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowerCAmelCase__ )
a : Any = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ )
a : str = model_a(**lowerCAmelCase__ )
a : Dict = after_outputs[0]
a : Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase__ , 1E-5 )
@require_flax
class __UpperCamelCase ( a__ , unittest.TestCase ):
def __a ( self ) -> List[Any]:
a : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-bert" , vision_from_pt=lowerCAmelCase__ , text_from_pt=lowerCAmelCase__ , )
a : Any = 13
a : str = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
a : str = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
a : Optional[Any] = random_attention_mask([batch_size, 4] )
a : Optional[Any] = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
a : Dict = FlaxViTModel(lowerCAmelCase__ )
a : Dict = FlaxBertModel(lowerCAmelCase__ )
return vision_model, text_model
def __a ( self ) -> str:
a : Union[str, Any] = FlaxViTModelTester(self )
a : Dict = FlaxBertModelTester(self )
a : str = vit_model_tester.prepare_config_and_inputs()
a : Any = bert_model_tester.prepare_config_and_inputs()
a, a : Optional[int] = vision_config_and_inputs
a, a, a, a : Dict = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class __UpperCamelCase ( a__ , unittest.TestCase ):
def __a ( self ) -> List[Any]:
a : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-clip" , "hf-internal-testing/tiny-bert" , vision_from_pt=lowerCAmelCase__ , text_from_pt=lowerCAmelCase__ , )
a : Tuple = 13
a : Union[str, Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
a : Union[str, Any] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
a : Tuple = random_attention_mask([batch_size, 4] )
a : str = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
a : List[Any] = FlaxCLIPVisionModel(lowerCAmelCase__ )
a : Tuple = FlaxBertModel(lowerCAmelCase__ )
return vision_model, text_model
def __a ( self ) -> List[Any]:
a : Tuple = FlaxCLIPVisionModelTester(self )
a : Union[str, Any] = FlaxBertModelTester(self )
a : Dict = clip_model_tester.prepare_config_and_inputs()
a : Optional[int] = bert_model_tester.prepare_config_and_inputs()
a, a : Dict = vision_config_and_inputs
a, a, a, a : Union[str, Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@slow
def __a ( self ) -> Dict:
a : str = FlaxVisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian" , logit_scale_init_value=1.0 )
a : Optional[Any] = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" )
a : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
a : Optional[int] = processor(
text=["una foto di un gatto", "una foto di un cane"] , images=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="np" )
a : Optional[Any] = model(**lowerCAmelCase__ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
a : List[str] = np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , lowerCAmelCase__ , atol=1E-3 ) )
| 79
| 1
|
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
lowerCAmelCase__ : int =datasets.logging.get_logger(__name__)
lowerCAmelCase__ : List[Any] ='''\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",
author = "Moosavi, Nafise Sadat and
Strube, Michael",
booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = aug,
year = "2016",
address = "Berlin, Germany",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/P16-1060",
doi = "10.18653/v1/P16-1060",
pages = "632--642",
}
'''
lowerCAmelCase__ : str ='''\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
'''
lowerCAmelCase__ : Dict ='''
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting \'keep_singletons=False\', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
\'mentions\': mentions
\'muc\': MUC metric [Vilain et al, 1995]
\'bcub\': B-cubed [Bagga and Baldwin, 1998]
\'ceafe\': CEAFe [Luo et al., 2005]
\'lea\': LEA [Moosavi and Strube, 2016]
\'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric(\'coval\')
>>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',
... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',
... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',
... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',
... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',
... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}
'''
def __lowercase ( a__ , a__ , a__=False , a__=False , a__=True , a__=False , a__="dummy_doc" ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = {doc: key_lines}
__SCREAMING_SNAKE_CASE = {doc: sys_lines}
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = reader.get_doc_mentions(a__ , key_doc_lines[doc] , a__ )
key_singletons_num += singletons_num
if NP_only or min_span:
__SCREAMING_SNAKE_CASE = reader.set_annotated_parse_trees(a__ , key_doc_lines[doc] , a__ , a__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = reader.get_doc_mentions(a__ , sys_doc_lines[doc] , a__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
__SCREAMING_SNAKE_CASE = reader.set_annotated_parse_trees(a__ , key_doc_lines[doc] , a__ , a__ )
if remove_nested:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = reader.remove_nested_coref_mentions(a__ , a__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = reader.remove_nested_coref_mentions(a__ , a__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
__SCREAMING_SNAKE_CASE = reader.get_mention_assignments(a__ , a__ )
__SCREAMING_SNAKE_CASE = reader.get_mention_assignments(a__ , a__ )
__SCREAMING_SNAKE_CASE = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'Number of removed nested coreferring mentions in the key '
f"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" )
logger.info(
'Number of resulting singleton clusters in the key '
f"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" )
if not keep_singletons:
logger.info(
f"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """
'files, respectively' )
return doc_coref_infos
def __lowercase ( a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> int:
__SCREAMING_SNAKE_CASE = get_coref_infos(a__ , a__ , a__ , a__ , a__ , a__ )
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
for name, metric in metrics:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = evaluator.evaluate_documents(a__ , a__ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"""{name}/recall""": recall, f"""{name}/precision""": precision, f"""{name}/f1""": fa} )
logger.info(
name.ljust(10 ) , f"""Recall: {recall * 1_00:.2f}""" , f""" Precision: {precision * 1_00:.2f}""" , f""" F1: {fa * 1_00:.2f}""" , )
if conll_subparts_num == 3:
__SCREAMING_SNAKE_CASE = (conll / 3) * 1_00
logger.info(f"""CoNLL score: {conll:.2f}""" )
output_scores.update({'conll_score': conll} )
return output_scores
def __lowercase ( a__ ) -> int:
__SCREAMING_SNAKE_CASE = False
for line in key_lines:
if not line.startswith('#' ):
if len(line.split() ) > 6:
__SCREAMING_SNAKE_CASE = line.split()[5]
if not parse_col == "-":
__SCREAMING_SNAKE_CASE = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def _A ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Sequence(datasets.Value('string' ) ),
} ) , codebase_urls=['https://github.com/ns-moosavi/coval'] , reference_urls=[
'https://github.com/ns-moosavi/coval',
'https://www.aclweb.org/anthology/P16-1060',
'http://www.conll.cemantix.org/2012/data.html',
] , )
def _A ( self , _A , _A , _A=True , _A=False , _A=False , _A=False ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [
('mentions', evaluator.mentions),
('muc', evaluator.muc),
('bcub', evaluator.b_cubed),
('ceafe', evaluator.ceafe),
('lea', evaluator.lea),
]
if min_span:
__SCREAMING_SNAKE_CASE = util.check_gold_parse_annotation(_A )
if not has_gold_parse:
raise NotImplementedError('References should have gold parse annotation to use \'min_span\'.' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
__SCREAMING_SNAKE_CASE = evaluate(
key_lines=_A , sys_lines=_A , metrics=_A , NP_only=_A , remove_nested=_A , keep_singletons=_A , min_span=_A , )
return score
| 257
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : List[str] = '''Speech2TextFeatureExtractor'''
UpperCamelCase__ : List[str] = '''Speech2TextTokenizer'''
def __init__( self , _A , _A ):
'''simple docstring'''
super().__init__(_A , _A )
__SCREAMING_SNAKE_CASE = self.feature_extractor
__SCREAMING_SNAKE_CASE = False
def __call__( self , *_A , **_A ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*_A , **_A )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
__SCREAMING_SNAKE_CASE = kwargs.pop('raw_speech' )
else:
__SCREAMING_SNAKE_CASE = kwargs.pop('audio' , _A )
__SCREAMING_SNAKE_CASE = kwargs.pop('sampling_rate' , _A )
__SCREAMING_SNAKE_CASE = kwargs.pop('text' , _A )
if len(_A ) > 0:
__SCREAMING_SNAKE_CASE = args[0]
__SCREAMING_SNAKE_CASE = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
__SCREAMING_SNAKE_CASE = self.feature_extractor(_A , *_A , sampling_rate=_A , **_A )
if text is not None:
__SCREAMING_SNAKE_CASE = self.tokenizer(_A , **_A )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__SCREAMING_SNAKE_CASE = encodings['input_ids']
return inputs
def _A ( self , *_A , **_A ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_A , **_A )
def _A ( self , *_A , **_A ):
'''simple docstring'''
return self.tokenizer.decode(*_A , **_A )
@contextmanager
def _A ( self ):
'''simple docstring'''
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = self.tokenizer
yield
__SCREAMING_SNAKE_CASE = self.feature_extractor
__SCREAMING_SNAKE_CASE = False
| 257
| 1
|
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : int = logging.get_logger(__name__)
__A : Any = {
'''huggingface/time-series-transformer-tourism-monthly''': (
'''https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'''
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class __UpperCamelCase ( SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE = 'time_series_transformer'
SCREAMING_SNAKE_CASE = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__(self : str , __SCREAMING_SNAKE_CASE : Union[str, Any] = None , __SCREAMING_SNAKE_CASE : Union[str, Any] = None , __SCREAMING_SNAKE_CASE : List[str] = "student_t" , __SCREAMING_SNAKE_CASE : Dict = "nll" , __SCREAMING_SNAKE_CASE : List[str] = 1 , __SCREAMING_SNAKE_CASE : Union[str, Any] = [1, 2, 3, 4, 5, 6, 7] , __SCREAMING_SNAKE_CASE : str = "mean" , __SCREAMING_SNAKE_CASE : Optional[int] = 0 , __SCREAMING_SNAKE_CASE : Optional[Any] = 0 , __SCREAMING_SNAKE_CASE : Any = 0 , __SCREAMING_SNAKE_CASE : Union[str, Any] = 0 , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Dict = None , __SCREAMING_SNAKE_CASE : Tuple = 3_2 , __SCREAMING_SNAKE_CASE : Optional[int] = 3_2 , __SCREAMING_SNAKE_CASE : Optional[Any] = 2 , __SCREAMING_SNAKE_CASE : Any = 2 , __SCREAMING_SNAKE_CASE : Tuple = 2 , __SCREAMING_SNAKE_CASE : str = 2 , __SCREAMING_SNAKE_CASE : int = True , __SCREAMING_SNAKE_CASE : Dict = "gelu" , __SCREAMING_SNAKE_CASE : Union[str, Any] = 6_4 , __SCREAMING_SNAKE_CASE : List[Any] = 0.1 , __SCREAMING_SNAKE_CASE : Optional[int] = 0.1 , __SCREAMING_SNAKE_CASE : Dict = 0.1 , __SCREAMING_SNAKE_CASE : Tuple = 0.1 , __SCREAMING_SNAKE_CASE : List[str] = 0.1 , __SCREAMING_SNAKE_CASE : str = 1_0_0 , __SCREAMING_SNAKE_CASE : str = 0.0_2 , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , **__SCREAMING_SNAKE_CASE : Dict , ):
# time series specific configuration
A = prediction_length
A = context_length or prediction_length
A = distribution_output
A = loss
A = input_size
A = num_time_features
A = lags_sequence
A = scaling
A = num_dynamic_real_features
A = num_static_real_features
A = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(_SCREAMING_SNAKE_CASE) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`")
A = cardinality
else:
A = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(_SCREAMING_SNAKE_CASE) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`")
A = embedding_dimension
else:
A = [min(5_0 , (cat + 1) // 2) for cat in self.cardinality]
A = num_parallel_samples
# Transformer architecture configuration
A = input_size * len(_SCREAMING_SNAKE_CASE) + self._number_of_features
A = d_model
A = encoder_attention_heads
A = decoder_attention_heads
A = encoder_ffn_dim
A = decoder_ffn_dim
A = encoder_layers
A = decoder_layers
A = dropout
A = attention_dropout
A = activation_dropout
A = encoder_layerdrop
A = decoder_layerdrop
A = activation_function
A = init_std
A = use_cache
super().__init__(is_encoder_decoder=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
@property
def SCREAMING_SNAKE_CASE__ (self : Dict):
return (
sum(self.embedding_dimension)
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 364
|
"""simple docstring"""
from __future__ import annotations
class __UpperCamelCase :
def __init__(self : Tuple , __SCREAMING_SNAKE_CASE : int = 0):
A = key
def SCREAMING_SNAKE_CASE__ (self : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int):
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
A = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_5_5
return [chr(ord(__SCREAMING_SNAKE_CASE) ^ key) for ch in content]
def SCREAMING_SNAKE_CASE__ (self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int):
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
A = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_5_5
return [chr(ord(__SCREAMING_SNAKE_CASE) ^ key) for ch in content]
def SCREAMING_SNAKE_CASE__ (self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int = 0):
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
A = key or self.__key or 1
# make sure key can be any size
while key > 2_5_5:
key -= 2_5_5
# This will be returned
A = ""
for ch in content:
ans += chr(ord(__SCREAMING_SNAKE_CASE) ^ key)
return ans
def SCREAMING_SNAKE_CASE__ (self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int = 0):
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
A = key or self.__key or 1
# make sure key can be any size
while key > 2_5_5:
key -= 2_5_5
# This will be returned
A = ""
for ch in content:
ans += chr(ord(__SCREAMING_SNAKE_CASE) ^ key)
return ans
def SCREAMING_SNAKE_CASE__ (self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int = 0):
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
try:
with open(__SCREAMING_SNAKE_CASE) as fin, open("encrypt.out" , "w+") as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
except OSError:
return False
return True
def SCREAMING_SNAKE_CASE__ (self : Tuple , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int):
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
try:
with open(__SCREAMING_SNAKE_CASE) as fin, open("decrypt.out" , "w+") as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 57
| 0
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
__A = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
__A = 250_004
__A = 250_020
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( lowerCamelCase_ , unittest.TestCase ):
a__ : Dict = MBartaaTokenizer
a__ : Optional[int] = MBartaaTokenizerFast
a__ : Optional[Any] = True
a__ : str = True
def lowerCamelCase_ ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
snake_case : Optional[Any] = MBartaaTokenizer(SCREAMING_SNAKE_CASE , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : List[Any] = "<s>"
snake_case : List[str] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 1_054 )
def lowerCamelCase_ ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_054 )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Tuple = MBartaaTokenizer(SCREAMING_SNAKE_CASE , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=SCREAMING_SNAKE_CASE )
snake_case : List[Any] = tokenizer.tokenize("This is a test" )
self.assertListEqual(SCREAMING_SNAKE_CASE , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
snake_case : Optional[int] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
SCREAMING_SNAKE_CASE , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , )
snake_case : int = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE )
self.assertListEqual(
SCREAMING_SNAKE_CASE , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
snake_case : int = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE )
self.assertListEqual(
SCREAMING_SNAKE_CASE , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : int = {"input_ids": [[250_004, 11_062, 82_772, 7, 15, 82_772, 538, 51_529, 237, 17_198, 1_290, 206, 9, 215_175, 1_314, 136, 17_198, 1_290, 206, 9, 56_359, 42, 122_009, 9, 16_466, 16, 87_344, 4_537, 9, 4_717, 78_381, 6, 159_958, 7, 15, 24_480, 618, 4, 527, 22_693, 5_428, 4, 2_777, 24_480, 9_874, 4, 43_523, 594, 4, 803, 18_392, 33_189, 18, 4, 43_523, 24_447, 12_399, 100, 24_955, 83_658, 9_626, 144_057, 15, 839, 22_335, 16, 136, 24_955, 83_658, 83_479, 15, 39_102, 724, 16, 678, 645, 2_789, 1_328, 4_589, 42, 122_009, 115_774, 23, 805, 1_328, 46_876, 7, 136, 53_894, 1_940, 42_227, 41_159, 17_721, 823, 425, 4, 27_512, 98_722, 206, 136, 5_531, 4_970, 919, 17_336, 5, 2], [250_004, 20_080, 618, 83, 82_775, 47, 479, 9, 1_517, 73, 53_894, 333, 80_581, 110_117, 18_811, 5_256, 1_295, 51, 152_526, 297, 7_986, 390, 124_416, 538, 35_431, 214, 98, 15_044, 25_737, 136, 7_108, 43_701, 23, 756, 135_355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250_004, 581, 63_773, 119_455, 6, 147_797, 88_203, 7, 645, 70, 21, 3_285, 10_269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE , model_name="facebook/mbart-large-50" , revision="d3913889c59cd5c9e456b269c376325eabad57e2" , )
def lowerCamelCase_ ( self ):
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
snake_case : List[Any] = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart50", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case : Optional[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
snake_case : Optional[int] = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
snake_case : str = tempfile.mkdtemp()
snake_case : Optional[int] = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE )
snake_case : str = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
snake_case : List[str] = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
snake_case : Union[str, Any] = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE )
snake_case : Optional[Any] = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=True
snake_case : Dict = tempfile.mkdtemp()
snake_case : List[str] = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE , legacy_format=SCREAMING_SNAKE_CASE )
snake_case : Optional[Any] = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE )
# Checks it save with the same files
self.assertSequenceEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
snake_case : List[str] = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE )
snake_case : List[Any] = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
shutil.rmtree(SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=False
snake_case : str = tempfile.mkdtemp()
snake_case : str = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE , legacy_format=SCREAMING_SNAKE_CASE )
snake_case : str = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
snake_case : Optional[int] = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE )
snake_case : Union[str, Any] = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
shutil.rmtree(SCREAMING_SNAKE_CASE )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( unittest.TestCase ):
a__ : Any = """facebook/mbart-large-50-one-to-many-mmt"""
a__ : Tuple = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
a__ : int = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
a__ : Optional[int] = [EN_CODE, 82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2]
@classmethod
def lowerCamelCase_ ( cls ):
"""simple docstring"""
snake_case : MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO" )
snake_case : int = 1
return cls
def lowerCamelCase_ ( self ):
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 250_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 250_004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 250_020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["mr_IN"] , 250_038 )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self ):
"""simple docstring"""
self.assertIn(SCREAMING_SNAKE_CASE , self.tokenizer.all_special_ids )
snake_case : Union[str, Any] = [RO_CODE, 884, 9_019, 96, 9, 916, 86_792, 36, 18_743, 15_596, 5, 2]
snake_case : Optional[int] = self.tokenizer.decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE )
snake_case : Any = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=SCREAMING_SNAKE_CASE )
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.assertNotIn(self.tokenizer.eos_token , SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Optional[Any] = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] , SCREAMING_SNAKE_CASE )
snake_case : Optional[Any] = 10
snake_case : Dict = self.tokenizer(SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE ).input_ids[0]
self.assertEqual(ids[0] , SCREAMING_SNAKE_CASE )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self ):
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [250_053, 250_001] )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : str = tempfile.mkdtemp()
snake_case : str = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
snake_case : Dict = MBartaaTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , SCREAMING_SNAKE_CASE )
@require_torch
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Optional[int] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE , return_tensors="pt" )
snake_case : str = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : str = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
snake_case : List[Any] = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
snake_case : int = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : List[str] = self.tokenizer(self.src_text , padding=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , max_length=3 , return_tensors="pt" )
snake_case : int = self.tokenizer(
text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , max_length=10 , return_tensors="pt" )
snake_case : Tuple = targets["input_ids"]
snake_case : List[Any] = shift_tokens_right(SCREAMING_SNAKE_CASE , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : int = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR" )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , {
# en_XX, A, test, EOS
"input_ids": [[250_004, 62, 3_034, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 250_001,
} , )
| 148
|
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class lowerCamelCase__ ( lowerCamelCase_ , lowerCamelCase_ ):
a__ : Tuple = 1
@register_to_config
def __init__( self , SCREAMING_SNAKE_CASE=2_000 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=20 , SCREAMING_SNAKE_CASE=1E-3 ):
"""simple docstring"""
snake_case : Optional[Any] = None
snake_case : List[str] = None
snake_case : Any = None
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
snake_case : Any = torch.linspace(1 , self.config.sampling_eps , SCREAMING_SNAKE_CASE , device=SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
snake_case : Any = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
snake_case : Optional[int] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
snake_case : List[str] = std.flatten()
while len(std.shape ) < len(score.shape ):
snake_case : Any = std.unsqueeze(-1 )
snake_case : Optional[Any] = -score / std
# compute
snake_case : int = -1.0 / len(self.timesteps )
snake_case : str = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
snake_case : str = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
snake_case : Union[str, Any] = beta_t.unsqueeze(-1 )
snake_case : Tuple = -0.5 * beta_t * x
snake_case : Tuple = torch.sqrt(SCREAMING_SNAKE_CASE )
snake_case : List[str] = drift - diffusion**2 * score
snake_case : List[str] = x + drift * dt
# add noise
snake_case : Optional[int] = randn_tensor(x.shape , layout=x.layout , generator=SCREAMING_SNAKE_CASE , device=x.device , dtype=x.dtype )
snake_case : Dict = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 148
| 1
|
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase : Any = 1_6
lowercase : Optional[Any] = 3_2
def A_ ( A__ , A__ , A__ , A__ , A__ = 16 ) -> Union[str, Any]:
a__ : int = AutoTokenizer.from_pretrained('bert-base-cased' )
a__ : Any = DatasetDict(
{
'train': dataset['train'].select(A__ ),
'validation': dataset['train'].select(A__ ),
'test': dataset['validation'],
} )
def tokenize_function(A__ ):
# max_length=None => use the model max length (it's actually the default)
a__ : str = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=A__ , max_length=A__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
a__ : Any = datasets.map(
A__ , batched=A__ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
a__ : Any = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(A__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
a__ : List[Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
a__ : Union[str, Any] = 16
elif accelerator.mixed_precision != "no":
a__ : List[Any] = 8
else:
a__ : str = None
return tokenizer.pad(
A__ , padding='longest' , max_length=A__ , pad_to_multiple_of=A__ , return_tensors='pt' , )
# Instantiate dataloaders.
a__ : int = DataLoader(
tokenized_datasets['train'] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
a__ : Any = DataLoader(
tokenized_datasets['validation'] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
a__ : Optional[Any] = DataLoader(
tokenized_datasets['test'] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
return train_dataloader, eval_dataloader, test_dataloader
def A_ ( A__ , A__ ) -> Tuple:
# New Code #
a__ : List[str] = []
# Download the dataset
a__ : Union[str, Any] = load_dataset('glue' , 'mrpc' )
# Create our splits
a__ : Union[str, Any] = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
a__ : Any = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a__ : Tuple = config['lr']
a__ : Tuple = int(config['num_epochs'] )
a__ : Tuple = int(config['seed'] )
a__ : str = int(config['batch_size'] )
a__ : Tuple = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
a__ : Any = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
a__ : Dict = batch_size // MAX_GPU_BATCH_SIZE
a__ : Optional[int] = MAX_GPU_BATCH_SIZE
set_seed(A__ )
# New Code #
# Create our folds:
a__ : int = kfold.split(np.zeros(datasets['train'].num_rows ) , datasets['train']['label'] )
a__ : Union[str, Any] = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(A__ ):
a__ , a__ , a__ : int = get_fold_dataloaders(
A__ , A__ , A__ , A__ , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a__ : int = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=A__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
a__ : List[str] = model.to(accelerator.device )
# Instantiate optimizer
a__ : List[str] = AdamW(params=model.parameters() , lr=A__ )
# Instantiate scheduler
a__ : List[Any] = get_linear_schedule_with_warmup(
optimizer=A__ , num_warmup_steps=100 , num_training_steps=(len(A__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a__ , a__ , a__ , a__ , a__ : List[str] = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# Now we train the model
for epoch in range(A__ ):
model.train()
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
a__ : int = model(**A__ )
a__ : Optional[Any] = outputs.loss
a__ : Union[str, Any] = loss / gradient_accumulation_steps
accelerator.backward(A__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
a__ : Dict = model(**A__ )
a__ : Optional[Any] = outputs.logits.argmax(dim=-1 )
a__ , a__ : List[Any] = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=A__ , references=A__ , )
a__ : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , A__ )
# New Code #
# We also run predictions on the test set at the very end
a__ : Dict = []
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
a__ : List[Any] = model(**A__ )
a__ : Dict = outputs.logits
a__ , a__ : Tuple = accelerator.gather_for_metrics((predictions, batch['labels']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(A__ , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
a__ : Optional[Any] = torch.cat(A__ , dim=0 )
a__ : List[str] = torch.stack(A__ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
a__ : List[Any] = metric.compute(predictions=A__ , references=A__ )
accelerator.print('Average test metrics from all folds:' , A__ )
def A_ ( ) -> Union[str, Any]:
a__ : List[str] = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=A__ , default=A__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
# New Code #
parser.add_argument('--num_folds' , type=A__ , default=3 , help='The number of splits to perform across the dataset' )
a__ : Tuple = parser.parse_args()
a__ : Optional[Any] = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(A__ , A__ )
if __name__ == "__main__":
main()
| 225
|
import enum
import shutil
import sys
lowercase , lowercase : List[Any] = shutil.get_terminal_size()
lowercase : Union[str, Any] = {"""UP""": """A""", """DOWN""": """B""", """RIGHT""": """C""", """LEFT""": """D"""}
class A__ ( enum.Enum ):
"""simple docstring"""
__A : List[str] = 0
__A : str = 1
def A_ ( A__ , A__="" ) -> int:
sys.stdout.write(str(A__ ) + end )
sys.stdout.flush()
def A_ ( A__ , A__ , A__="" ) -> int:
forceWrite(F'\u001b[{color}m{content}\u001b[0m' , A__ )
def A_ ( ) -> Any:
forceWrite('\r' )
def A_ ( A__ , A__ ) -> List[str]:
forceWrite(F'\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}' )
def A_ ( ) -> Any:
forceWrite(' ' * TERMINAL_WIDTH )
reset_cursor()
def A_ ( ) -> Any:
reset_cursor()
forceWrite('-' * TERMINAL_WIDTH )
| 225
| 1
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : torch.FloatTensor
class _snake_case ( lowercase_ , lowercase_ ):
@register_to_config
def __init__( self , a__ = 32 , a__ = 64 , a__ = 20 , a__ = 768 , a__=77 , a__=4 , a__ = 0.0 , a__ = "silu" , a__ = None , a__ = None , a__ = "linear" , a__ = "prd" , a__ = None , a__ = None , a__ = None , ) -> Tuple:
'''simple docstring'''
super().__init__()
snake_case_ = num_attention_heads
snake_case_ = attention_head_dim
snake_case_ = num_attention_heads * attention_head_dim
snake_case_ = additional_embeddings
snake_case_ = time_embed_dim or inner_dim
snake_case_ = embedding_proj_dim or embedding_dim
snake_case_ = clip_embed_dim or embedding_dim
snake_case_ = Timesteps(a__ , a__ , 0 )
snake_case_ = TimestepEmbedding(a__ , a__ , out_dim=a__ , act_fn=a__ )
snake_case_ = nn.Linear(a__ , a__ )
if embedding_proj_norm_type is None:
snake_case_ = None
elif embedding_proj_norm_type == "layer":
snake_case_ = nn.LayerNorm(a__ )
else:
raise ValueError(F'unsupported embedding_proj_norm_type: {embedding_proj_norm_type}' )
snake_case_ = nn.Linear(a__ , a__ )
if encoder_hid_proj_type is None:
snake_case_ = None
elif encoder_hid_proj_type == "linear":
snake_case_ = nn.Linear(a__ , a__ )
else:
raise ValueError(F'unsupported encoder_hid_proj_type: {encoder_hid_proj_type}' )
snake_case_ = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , a__ ) )
if added_emb_type == "prd":
snake_case_ = nn.Parameter(torch.zeros(1 , 1 , a__ ) )
elif added_emb_type is None:
snake_case_ = None
else:
raise ValueError(
F'`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.' )
snake_case_ = nn.ModuleList(
[
BasicTransformerBlock(
a__ , a__ , a__ , dropout=a__ , activation_fn="gelu" , attention_bias=a__ , )
for d in range(a__ )
] )
if norm_in_type == "layer":
snake_case_ = nn.LayerNorm(a__ )
elif norm_in_type is None:
snake_case_ = None
else:
raise ValueError(F'Unsupported norm_in_type: {norm_in_type}.' )
snake_case_ = nn.LayerNorm(a__ )
snake_case_ = nn.Linear(a__ , a__ )
snake_case_ = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_0_0_0_0.0 )
causal_attention_mask.triu_(1 )
snake_case_ = causal_attention_mask[None, ...]
self.register_buffer("causal_attention_mask" , a__ , persistent=a__ )
snake_case_ = nn.Parameter(torch.zeros(1 , a__ ) )
snake_case_ = nn.Parameter(torch.zeros(1 , a__ ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowerCAmelCase__ ( self ) -> Dict[str, AttentionProcessor]:
'''simple docstring'''
snake_case_ = {}
def fn_recursive_add_processors(a__ , a__ , a__ ):
if hasattr(a__ , "set_processor" ):
snake_case_ = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'{name}.{sub_name}' , a__ , a__ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(a__ , a__ , a__ )
return processors
def lowerCAmelCase__ ( self , a__ ) -> List[Any]:
'''simple docstring'''
snake_case_ = len(self.attn_processors.keys() )
if isinstance(a__ , a__ ) and len(a__ ) != count:
raise ValueError(
F'A dict of processors was passed, but the number of processors {len(a__ )} does not match the'
F' number of attention layers: {count}. Please make sure to pass {count} processor classes.' )
def fn_recursive_attn_processor(a__ , a__ , a__ ):
if hasattr(a__ , "set_processor" ):
if not isinstance(a__ , a__ ):
module.set_processor(a__ )
else:
module.set_processor(processor.pop(F'{name}.processor' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'{name}.{sub_name}' , a__ , a__ )
for name, module in self.named_children():
fn_recursive_attn_processor(a__ , a__ , a__ )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ = None , a__ = None , a__ = True , ) -> Dict:
'''simple docstring'''
snake_case_ = hidden_states.shape[0]
snake_case_ = timestep
if not torch.is_tensor(a__ ):
snake_case_ = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(a__ ) and len(timesteps.shape ) == 0:
snake_case_ = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
snake_case_ = timesteps * torch.ones(a__ , dtype=timesteps.dtype , device=timesteps.device )
snake_case_ = self.time_proj(a__ )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
snake_case_ = timesteps_projected.to(dtype=self.dtype )
snake_case_ = self.time_embedding(a__ )
if self.embedding_proj_norm is not None:
snake_case_ = self.embedding_proj_norm(a__ )
snake_case_ = self.embedding_proj(a__ )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
snake_case_ = self.encoder_hidden_states_proj(a__ )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set" )
snake_case_ = self.proj_in(a__ )
snake_case_ = self.positional_embedding.to(hidden_states.dtype )
snake_case_ = []
snake_case_ = 0
if encoder_hidden_states is not None:
additional_embeds.append(a__ )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
snake_case_ = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
snake_case_ = hidden_states[:, None, :]
snake_case_ = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
snake_case_ = self.prd_embedding.to(hidden_states.dtype ).expand(a__ , -1 , -1 )
additional_embeds.append(a__ )
snake_case_ = torch.cat(
a__ , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
snake_case_ = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
snake_case_ = F.pad(
a__ , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
snake_case_ = hidden_states + positional_embeddings
if attention_mask is not None:
snake_case_ = (1 - attention_mask.to(hidden_states.dtype )) * -1_0_0_0_0.0
snake_case_ = F.pad(a__ , (0, self.additional_embeddings) , value=0.0 )
snake_case_ = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
snake_case_ = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
snake_case_ = self.norm_in(a__ )
for block in self.transformer_blocks:
snake_case_ = block(a__ , attention_mask=a__ )
snake_case_ = self.norm_out(a__ )
if self.prd_embedding is not None:
snake_case_ = hidden_states[:, -1]
else:
snake_case_ = hidden_states[:, additional_embeddings_len:]
snake_case_ = self.proj_to_clip_embeddings(a__ )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=a__ )
def lowerCAmelCase__ ( self , a__ ) -> str:
'''simple docstring'''
snake_case_ = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 85
|
'''simple docstring'''
from statistics import mean, stdev
def UpperCamelCase_( snake_case : list , snake_case : int = 3 ):
'''simple docstring'''
snake_case_ = min(snake_case )
snake_case_ = max(snake_case )
# normalize data
return [round((x - x_min) / (x_max - x_min) , snake_case ) for x in data]
def UpperCamelCase_( snake_case : list , snake_case : int = 3 ):
'''simple docstring'''
snake_case_ = mean(snake_case )
snake_case_ = stdev(snake_case )
# standardize data
return [round((x - mu) / (sigma) , snake_case ) for x in data]
| 85
| 1
|
'''simple docstring'''
import argparse
import json
import subprocess
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = []
UpperCamelCase__ = (
F'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
" https://api.github.com/repos/huggingface/transformers/actions/runners"
)
UpperCamelCase__ = subprocess.run(_UpperCamelCase , shell=_UpperCamelCase , stdout=subprocess.PIPE )
UpperCamelCase__ = output.stdout.decode("utf-8" )
UpperCamelCase__ = json.loads(_UpperCamelCase )
UpperCamelCase__ = status["runners"]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_UpperCamelCase )
# save the result so we can report them on Slack
with open("offline_runners.txt" , "w" ) as fp:
fp.write(json.dumps(_UpperCamelCase ) )
if len(_UpperCamelCase ) > 0:
UpperCamelCase__ = "\n".join([x["name"] for x in offline_runners] )
raise ValueError(F'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
return values.split("," )
__lowercase: str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--target_runners",
default=None,
type=list_str,
required=True,
help="Comma-separated list of runners to check status.",
)
parser.add_argument(
"--token", default=None, type=str, required=True, help="A token that has actions:read permission."
)
__lowercase: str = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 31
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float , ) -> tuple:
'''simple docstring'''
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 1
|
from __future__ import annotations
from collections import namedtuple
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> tuple:
"""simple docstring"""
_lowercase =namedtuple('''result''' , '''name value''' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('''Only one argument must be 0''' )
elif power < 0:
raise ValueError(
'''Power cannot be negative in any electrical/electronics system''' )
elif voltage == 0:
return result('''voltage''' , power / current )
elif current == 0:
return result('''current''' , power / voltage )
elif power == 0:
return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5
|
'''simple docstring'''
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
_lowerCamelCase : Any = logging.get_logger(__name__)
@add_end_docstrings(_a )
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self : Any , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
requires_backends(self , 'decord' )
self.check_model_type(UpperCamelCase__ )
def A ( self : Optional[int] , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Optional[Any]=None ):
"""simple docstring"""
UpperCamelCase = {}
if frame_sampling_rate is not None:
UpperCamelCase = frame_sampling_rate
if num_frames is not None:
UpperCamelCase = num_frames
UpperCamelCase = {}
if top_k is not None:
UpperCamelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : List[str] , UpperCamelCase__ : Union[str, List[str]] , **UpperCamelCase__ : Dict ):
"""simple docstring"""
return super().__call__(UpperCamelCase__ , **UpperCamelCase__ )
def A ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Tuple=1 ):
"""simple docstring"""
if num_frames is None:
UpperCamelCase = self.model.config.num_frames
if video.startswith('http://' ) or video.startswith('https://' ):
UpperCamelCase = BytesIO(requests.get(UpperCamelCase__ ).content )
UpperCamelCase = VideoReader(UpperCamelCase__ )
videoreader.seek(0 )
UpperCamelCase = 0
UpperCamelCase = num_frames * frame_sampling_rate - 1
UpperCamelCase = np.linspace(UpperCamelCase__ , UpperCamelCase__ , num=UpperCamelCase__ , dtype=np.intaa )
UpperCamelCase = videoreader.get_batch(UpperCamelCase__ ).asnumpy()
UpperCamelCase = list(UpperCamelCase__ )
UpperCamelCase = self.image_processor(UpperCamelCase__ , return_tensors=self.framework )
return model_inputs
def A ( self : Union[str, Any] , UpperCamelCase__ : List[str] ):
"""simple docstring"""
UpperCamelCase = self.model(**UpperCamelCase__ )
return model_outputs
def A ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : List[Any]=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
UpperCamelCase = self.model.config.num_labels
if self.framework == "pt":
UpperCamelCase = model_outputs.logits.softmax(-1 )[0]
UpperCamelCase , UpperCamelCase = probs.topk(UpperCamelCase__ )
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
UpperCamelCase = scores.tolist()
UpperCamelCase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase__ , UpperCamelCase__ )]
| 28
| 0
|
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
snake_case_ : int = logging.get_logger(__name__)
class __snake_case ( a ):
def __init__( self : Any , *_snake_case : Union[str, Any] , **_snake_case : List[str]):
"""simple docstring"""
warnings.warn(
'''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use CLIPImageProcessor instead.''' , _snake_case , )
super().__init__(*_snake_case , **_snake_case)
| 370
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __snake_case ( a ):
UpperCAmelCase__ : Optional[int] = (DPMSolverSinglestepScheduler,)
UpperCAmelCase__ : str = (('''num_inference_steps''', 2_5),)
def lowerCamelCase ( self : Dict , **_snake_case : Dict):
"""simple docstring"""
UpperCAmelCase_ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
'''sample_max_value''': 1.0,
'''algorithm_type''': '''dpmsolver++''',
'''solver_type''': '''midpoint''',
'''lambda_min_clipped''': -float('''inf'''),
'''variance_type''': None,
}
config.update(**_snake_case)
return config
def lowerCamelCase ( self : Dict , _snake_case : int=0 , **_snake_case : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _snake_case)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config(**_snake_case)
UpperCAmelCase_ = scheduler_class(**_snake_case)
scheduler.set_timesteps(_snake_case)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_snake_case)
UpperCAmelCase_ = scheduler_class.from_pretrained(_snake_case)
new_scheduler.set_timesteps(_snake_case)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase_ , UpperCAmelCase_ = sample, sample
for t in range(_snake_case , time_step + scheduler.config.solver_order + 1):
UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample
UpperCAmelCase_ = new_scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
pass
def lowerCamelCase ( self : Tuple , _snake_case : Optional[Any]=0 , **_snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _snake_case)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_snake_case)
scheduler.set_timesteps(_snake_case)
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_snake_case)
UpperCAmelCase_ = scheduler_class.from_pretrained(_snake_case)
# copy over dummy past residuals
new_scheduler.set_timesteps(_snake_case)
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample
UpperCAmelCase_ = new_scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def lowerCamelCase ( self : Dict , _snake_case : int=None , **_snake_case : Optional[Any]):
"""simple docstring"""
if scheduler is None:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_snake_case)
UpperCAmelCase_ = scheduler_class(**_snake_case)
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_snake_case)
UpperCAmelCase_ = scheduler_class(**_snake_case)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_snake_case)
for i, t in enumerate(scheduler.timesteps):
UpperCAmelCase_ = model(_snake_case , _snake_case)
UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case).prev_sample
return sample
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
UpperCAmelCase_ = 50
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_snake_case)
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:]):
UpperCAmelCase_ = model(_snake_case , _snake_case)
UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case).prev_sample
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.2_5_7_4) < 1e-3
def lowerCamelCase ( self : int):
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_snake_case)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
UpperCAmelCase_ = self.full_loop(scheduler=_snake_case)
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.2_7_9_1) < 1e-3
UpperCAmelCase_ = DEISMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = DPMSolverMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = UniPCMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = DPMSolverSinglestepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = self.full_loop(scheduler=_snake_case)
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.2_7_9_1) < 1e-3
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
self.check_over_configs(thresholding=_snake_case)
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_snake_case , prediction_type=_snake_case , sample_max_value=_snake_case , algorithm_type='''dpmsolver++''' , solver_order=_snake_case , solver_type=_snake_case , )
def lowerCamelCase ( self : Dict):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_snake_case)
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_snake_case , solver_type=_snake_case , prediction_type=_snake_case , algorithm_type=_snake_case , )
UpperCAmelCase_ = self.full_loop(
solver_order=_snake_case , solver_type=_snake_case , prediction_type=_snake_case , algorithm_type=_snake_case , )
assert not torch.isnan(_snake_case).any(), "Samples have nan numbers"
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
self.check_over_configs(lower_order_final=_snake_case)
self.check_over_configs(lower_order_final=_snake_case)
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
self.check_over_configs(lambda_min_clipped=-float('''inf'''))
self.check_over_configs(lambda_min_clipped=-5.1)
def lowerCamelCase ( self : int):
"""simple docstring"""
self.check_over_configs(variance_type=_snake_case)
self.check_over_configs(variance_type='''learned_range''')
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_snake_case , time_step=0)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.full_loop()
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.2_7_9_1) < 1e-3
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.full_loop(use_karras_sigmas=_snake_case)
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.2_2_4_8) < 1e-3
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''')
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.1_4_5_3) < 1e-3
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=_snake_case)
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.0_6_4_9) < 1e-3
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(thresholding=_snake_case , dynamic_thresholding_ratio=0)
UpperCAmelCase_ = scheduler_class(**_snake_case)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter.half()
scheduler.set_timesteps(_snake_case)
for i, t in enumerate(scheduler.timesteps):
UpperCAmelCase_ = model(_snake_case , _snake_case)
UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case).prev_sample
assert sample.dtype == torch.floataa
| 7
| 0
|
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase__ : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase__ : Dict = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def lowerCamelCase__ ( a , a , a=8 ) -> List[Any]:
_A: int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_A: str = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def lowerCamelCase__ ( a , a=5_12 , a=5_12 ) -> Dict:
_A: Union[str, Any] = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
_A: Tuple = np.array(pil_image.convert('''RGB''' ) )
_A: List[str] = arr.astype(np.floataa ) / 127.5 - 1
_A: Tuple = np.transpose(a , [2, 0, 1] )
_A: Any = torch.from_numpy(a ).unsqueeze(0 )
return image
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self : int , lowerCAmelCase_ : UNetaDConditionModel , lowerCAmelCase_ : DDPMScheduler , lowerCAmelCase_ : VQModel , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , movq=lowerCAmelCase_ , )
_A: List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
# get the original timestep using init_timestep
_A: Union[str, Any] = min(int(num_inference_steps * strength ) , lowerCAmelCase_ )
_A: str = max(num_inference_steps - init_timestep , 0 )
_A: str = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int]=None ):
"""simple docstring"""
if not isinstance(lowerCAmelCase_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowerCAmelCase_ )}""" )
_A: Optional[int] = image.to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_ )
_A: Union[str, Any] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
_A: Optional[int] = image
else:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(lowerCAmelCase_ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A: List[Any] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowerCAmelCase_ )
]
_A: Optional[Any] = torch.cat(lowerCAmelCase_ , dim=0 )
else:
_A: Optional[int] = self.movq.encode(lowerCAmelCase_ ).latent_dist.sample(lowerCAmelCase_ )
_A: int = self.movq.config.scaling_factor * init_latents
_A: Optional[Any] = torch.cat([init_latents] , dim=0 )
_A: Any = init_latents.shape
_A: Optional[Any] = randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_ , dtype=lowerCAmelCase_ )
# get latents
_A: Union[str, Any] = self.scheduler.add_noise(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_A: List[str] = init_latents
return latents
def __magic_name__ ( self : Optional[int] , lowerCAmelCase_ : Optional[int]=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
_A: Any = torch.device(F"""cuda:{gpu_id}""" )
_A: int = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCAmelCase_ , lowerCAmelCase_ )
def __magic_name__ ( self : Any , lowerCAmelCase_ : Any=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
_A: Any = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=lowerCAmelCase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_A: int = None
for cpu_offloaded_model in [self.unet, self.movq]:
_A , _A: List[Any] = cpu_offload_with_hook(lowerCAmelCase_ , lowerCAmelCase_ , prev_module_hook=lowerCAmelCase_ )
# We'll offload the last model manually.
_A: Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCAmelCase_ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowerCAmelCase_ )
def __call__( self : Optional[Any] , lowerCAmelCase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCAmelCase_ : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , lowerCAmelCase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCAmelCase_ : int = 5_1_2 , lowerCAmelCase_ : int = 5_1_2 , lowerCAmelCase_ : int = 1_0_0 , lowerCAmelCase_ : float = 4.0 , lowerCAmelCase_ : float = 0.3 , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , ):
"""simple docstring"""
_A: Any = self._execution_device
_A: Any = guidance_scale > 1.0
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A: Any = torch.cat(lowerCAmelCase_ , dim=0 )
_A: int = image_embeds.shape[0]
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A: Dict = torch.cat(lowerCAmelCase_ , dim=0 )
if do_classifier_free_guidance:
_A: Any = image_embeds.repeat_interleave(lowerCAmelCase_ , dim=0 )
_A: str = negative_image_embeds.repeat_interleave(lowerCAmelCase_ , dim=0 )
_A: Dict = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowerCAmelCase_ )
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A: List[str] = [image]
if not all(isinstance(lowerCAmelCase_ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"""Input is in incorrect format: {[type(lowerCAmelCase_ ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
_A: List[str] = torch.cat([prepare_image(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) for i in image] , dim=0 )
_A: Tuple = image.to(dtype=image_embeds.dtype , device=lowerCAmelCase_ )
_A: Optional[Any] = self.movq.encode(lowerCAmelCase_ )['''latents''']
_A: Optional[int] = latents.repeat_interleave(lowerCAmelCase_ , dim=0 )
self.scheduler.set_timesteps(lowerCAmelCase_ , device=lowerCAmelCase_ )
_A , _A: List[Any] = self.get_timesteps(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_A: Dict = timesteps[:1].repeat(batch_size * num_images_per_prompt )
_A , _A: Optional[int] = downscale_height_and_width(lowerCAmelCase_ , lowerCAmelCase_ , self.movq_scale_factor )
_A: Any = self.prepare_latents(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , image_embeds.dtype , lowerCAmelCase_ , lowerCAmelCase_ )
for i, t in enumerate(self.progress_bar(lowerCAmelCase_ ) ):
# expand the latents if we are doing classifier free guidance
_A: Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_A: str = {'''image_embeds''': image_embeds}
_A: Optional[int] = self.unet(
sample=lowerCAmelCase_ , timestep=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , added_cond_kwargs=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , )[0]
if do_classifier_free_guidance:
_A , _A: str = noise_pred.split(latents.shape[1] , dim=1 )
_A , _A: int = noise_pred.chunk(2 )
_A , _A: int = variance_pred.chunk(2 )
_A: Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_A: List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_A , _A: Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_A: Any = self.scheduler.step(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ , )[0]
# post-processing
_A: Tuple = self.movq.decode(lowerCAmelCase_ , force_not_quantize=lowerCAmelCase_ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
_A: int = image * 0.5 + 0.5
_A: Any = image.clamp(0 , 1 )
_A: Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_A: Union[str, Any] = self.numpy_to_pil(lowerCAmelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase_ )
| 121
|
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
UpperCAmelCase__ : Union[str, Any] = TypeVar('T')
class UpperCAmelCase ( Generic[T] ):
'''simple docstring'''
__UpperCamelCase : deque[T] # Cache store of keys
__UpperCamelCase : set[T] # References of the keys in cache
__UpperCamelCase : int = 10 # Maximum capacity of cache
def __init__( self : List[str] , lowerCAmelCase_ : int ):
"""simple docstring"""
_A: Tuple = deque()
_A: List[Any] = set()
if not n:
_A: str = sys.maxsize
elif n < 0:
raise ValueError('''n should be an integer greater than 0.''' )
else:
_A: Dict = n
def __magic_name__ ( self : Optional[int] , lowerCAmelCase_ : T ):
"""simple docstring"""
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
_A: Optional[Any] = self.dq_store.pop()
self.key_reference.remove(lowerCAmelCase_ )
else:
self.dq_store.remove(lowerCAmelCase_ )
self.dq_store.appendleft(lowerCAmelCase_ )
self.key_reference.add(lowerCAmelCase_ )
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
for k in self.dq_store:
print(lowerCAmelCase_ )
def __repr__( self : Dict ):
"""simple docstring"""
return F"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ : LRUCache[str | int] = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 121
| 1
|
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
lowerCamelCase_ = {
"""169M""": 1_2,
"""430M""": 2_4,
"""1B5""": 2_4,
"""3B""": 3_2,
"""7B""": 3_2,
"""14B""": 4_0,
}
lowerCamelCase_ = {
"""169M""": 7_6_8,
"""430M""": 1_0_2_4,
"""1B5""": 2_0_4_8,
"""3B""": 2_5_6_0,
"""7B""": 4_0_9_6,
"""14B""": 5_1_2_0,
}
def lowerCamelCase ( a_ ) -> Optional[int]:
lowerCAmelCase_ = list(state_dict.keys() )
for name in state_dict_keys:
lowerCAmelCase_ = state_dict.pop(a_ )
# emb -> embedding
if name.startswith('emb.' ):
lowerCAmelCase_ = name.replace('emb.' , 'embeddings.' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('blocks.0.ln0' ):
lowerCAmelCase_ = name.replace('blocks.0.ln0' , 'blocks.0.pre_ln' )
# att -> attention
lowerCAmelCase_ = re.sub(R'blocks\.(\d+)\.att' , R'blocks.\1.attention' , a_ )
# ffn -> feed_forward
lowerCAmelCase_ = re.sub(R'blocks\.(\d+)\.ffn' , R'blocks.\1.feed_forward' , a_ )
# time_mix_k -> time_mix_key and reshape
if name.endswith('.time_mix_k' ):
lowerCAmelCase_ = name.replace('.time_mix_k' , '.time_mix_key' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('.time_mix_v' ):
lowerCAmelCase_ = name.replace('.time_mix_v' , '.time_mix_value' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('.time_mix_r' ):
lowerCAmelCase_ = name.replace('.time_mix_r' , '.time_mix_receptance' )
if name != "head.weight":
lowerCAmelCase_ = 'rwkv.' + name
lowerCAmelCase_ = weight
return state_dict
def lowerCamelCase ( a_ , a_ , a_ , a_=None , a_=None , a_=False , a_=None ) -> Any:
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print('No `--tokenizer_file` provided, we will use the default tokenizer.' )
lowerCAmelCase_ = 50_277
lowerCAmelCase_ = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b' )
else:
lowerCAmelCase_ = PreTrainedTokenizerFast(tokenizer_file=a_ )
lowerCAmelCase_ = len(a_ )
tokenizer.save_pretrained(a_ )
# 2. Build the config
lowerCAmelCase_ = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
lowerCAmelCase_ = candidate
break
if size is None:
raise ValueError('Could not infer the size, please provide it with the `--size` argument.' )
if size not in possible_sizes:
raise ValueError(F'''`size` should be one of {possible_sizes}, got {size}.''' )
lowerCAmelCase_ = RwkvConfig(
vocab_size=a_ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(a_ )
# 3. Download model file then convert state_dict
lowerCAmelCase_ = hf_hub_download(a_ , a_ )
lowerCAmelCase_ = torch.load(a_ , map_location='cpu' )
lowerCAmelCase_ = convert_state_dict(a_ )
# 4. Split in shards and save
lowerCAmelCase_ , lowerCAmelCase_ = shard_checkpoint(a_ )
for shard_file, shard in shards.items():
torch.save(a_ , os.path.join(a_ , a_ ) )
if index is not None:
lowerCAmelCase_ = os.path.join(a_ , a_ )
# Save the index as well
with open(a_ , 'w' , encoding='utf-8' ) as f:
lowerCAmelCase_ = json.dumps(a_ , indent=2 , sort_keys=a_ ) + '\n'
f.write(a_ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.' )
lowerCAmelCase_ = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
lowerCAmelCase_ = torch.load(os.path.join(a_ , a_ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(a_ , a_ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('Please provide a `model_name` to push the model to the Hub.' )
lowerCAmelCase_ = AutoModelForCausalLM.from_pretrained(a_ )
model.push_to_hub(a_ , max_shard_size='2GB' )
tokenizer.push_to_hub(a_ )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--repo_id""", default=None, type=str, required=True, help="""Repo ID from which to pull the checkpoint."""
)
parser.add_argument(
"""--checkpoint_file""", default=None, type=str, required=True, help="""Name of the checkpoint file in the repo."""
)
parser.add_argument(
"""--output_dir""", default=None, type=str, required=True, help="""Where to save the converted model."""
)
parser.add_argument(
"""--tokenizer_file""",
default=None,
type=str,
help="""Path to the tokenizer file to use (if not provided, only the model is converted).""",
)
parser.add_argument(
"""--size""",
default=None,
type=str,
help="""Size of the model. Will be inferred from the `checkpoint_file` if not passed.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Push to the Hub the converted model.""",
)
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""Name of the pushed model on the Hub, including the username / organization.""",
)
lowerCamelCase_ = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 14
|
from __future__ import annotations
lowerCamelCase_ = 1_0
def lowerCamelCase ( a_ ) -> list[int]:
lowerCAmelCase_ = 1
lowerCAmelCase_ = max(a_ )
while placement <= max_digit:
# declare and initialize empty buckets
lowerCAmelCase_ = [[] for _ in range(a_ )]
# split list_of_ints between the buckets
for i in list_of_ints:
lowerCAmelCase_ = int((i / placement) % RADIX )
buckets[tmp].append(a_ )
# put each buckets' contents into list_of_ints
lowerCAmelCase_ = 0
for b in range(a_ ):
for i in buckets[b]:
lowerCAmelCase_ = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14
| 1
|
'''simple docstring'''
import torch
from transformers import AutoModel
class A__ ( torch.nn.Module ):
def __init__( self : List[Any] , _a : Union[str, Any]="sayef/fsner-bert-base-uncased" ) -> List[str]:
'''simple docstring'''
super(_a , self ).__init__()
_SCREAMING_SNAKE_CASE =AutoModel.from_pretrained(_a , return_dict=_a )
_SCREAMING_SNAKE_CASE =torch.nn.CosineSimilarity(3 , 1e-08 )
_SCREAMING_SNAKE_CASE =torch.nn.Softmax(dim=1 )
def A ( self : int , **_a : str ) -> Union[str, Any]:
'''simple docstring'''
return self.bert(**_a ).last_hidden_state
def A ( self : str , _a : int ) -> Optional[Any]:
'''simple docstring'''
return token_embeddings.sum(2 , keepdim=_a )
def A ( self : Dict , _a : List[str] , _a : Dict , _a : Dict=1 ) -> Optional[int]:
'''simple docstring'''
return self.softmax(T * self.cos(_a , _a ) )
def A ( self : Optional[Any] , _a : List[str] , _a : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =W_supports['sizes'].tolist()
_SCREAMING_SNAKE_CASE =W_supports['start_token_id'].item()
_SCREAMING_SNAKE_CASE =W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
_SCREAMING_SNAKE_CASE =self.BERT(**_a )
_SCREAMING_SNAKE_CASE =self.BERT(**_a )
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =W_supports['input_ids'] == start_token_id
_SCREAMING_SNAKE_CASE =W_supports['input_ids'] == end_token_id
for i, size in enumerate(_a ):
if i == 0:
_SCREAMING_SNAKE_CASE =0
else:
_SCREAMING_SNAKE_CASE =support_sizes[i - 1]
_SCREAMING_SNAKE_CASE =S[s : s + size][start_token_masks[s : s + size]]
_SCREAMING_SNAKE_CASE =S[s : s + size][end_token_masks[s : s + size]]
_SCREAMING_SNAKE_CASE =torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
_SCREAMING_SNAKE_CASE =torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
_SCREAMING_SNAKE_CASE =torch.vstack((p_starts, p_start) )
_SCREAMING_SNAKE_CASE =torch.vstack((p_ends, p_end) )
else:
_SCREAMING_SNAKE_CASE =p_start
_SCREAMING_SNAKE_CASE =p_end
return p_starts, p_ends
| 47
|
'''simple docstring'''
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
lowerCamelCase : List[Any] = "\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n"
lowerCamelCase : Optional[Any] = "\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n"
lowerCamelCase : int = "\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for 'cvit-mkb-clsr' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for 'cvit-mkb-clsr' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"precision\": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'precision@10': 1.0}\n\n"
def _lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : Tuple ) -> List[Any]:
"""simple docstring"""
return float((preds == labels).mean() )
def _lowerCAmelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =simple_accuracy(_UpperCamelCase , _UpperCamelCase )
_SCREAMING_SNAKE_CASE =float(fa_score(y_true=_UpperCamelCase , y_pred=_UpperCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def _lowerCAmelCase ( _UpperCamelCase : Any , _UpperCamelCase : int ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =np.array(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =np.array(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =en_sentvecs.shape[0]
# mean centering
_SCREAMING_SNAKE_CASE =en_sentvecs - np.mean(_UpperCamelCase , axis=0 )
_SCREAMING_SNAKE_CASE =in_sentvecs - np.mean(_UpperCamelCase , axis=0 )
_SCREAMING_SNAKE_CASE =cdist(_UpperCamelCase , _UpperCamelCase , 'cosine' )
_SCREAMING_SNAKE_CASE =np.array(range(_UpperCamelCase ) )
_SCREAMING_SNAKE_CASE =sim.argsort(axis=1 )[:, :10]
_SCREAMING_SNAKE_CASE =np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
def A ( self : Any ) -> List[str]:
'''simple docstring'''
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '
'"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '
'"wiki-ner"]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' )
if self.config_name != 'cvit-mkb-clsr'
else datasets.Sequence(datasets.Value('float32' ) ),
'references': datasets.Value('int64' )
if self.config_name != 'cvit-mkb-clsr'
else datasets.Sequence(datasets.Value('float32' ) ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' if self.config_name != 'cvit-mkb-clsr' else None , )
def A ( self : List[str] , _a : Tuple , _a : Optional[int] ) -> int:
'''simple docstring'''
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(_a , _a )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(_a , _a )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(_a , _a )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '
'"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '
'"wiki-ner"]' )
| 47
| 1
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def a__ ( A__, A__, A__, A__, A__=True, A__="pt" ):
SCREAMING_SNAKE_CASE_ : Dict = {'add_prefix_space': True} if isinstance(A__, A__ ) and not line.startswith(' ' ) else {}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = padding_side
return tokenizer(
[line], max_length=A__, padding='max_length' if pad_to_max_length else None, truncation=A__, return_tensors=A__, add_special_tokens=A__, **A__, )
def a__ ( A__, A__, A__=None, ):
SCREAMING_SNAKE_CASE_ : List[str] = input_ids.ne(A__ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="train" , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="" , ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ : Dict = Path(lowerCAmelCase__ ).joinpath(type_path + '.source' )
SCREAMING_SNAKE_CASE_ : List[Any] = Path(lowerCAmelCase__ ).joinpath(type_path + '.target' )
SCREAMING_SNAKE_CASE_ : int = self.get_char_lens(self.src_file )
SCREAMING_SNAKE_CASE_ : Any = max_source_length
SCREAMING_SNAKE_CASE_ : str = max_target_length
assert min(self.src_lens ) > 0, F'''found empty line in {self.src_file}'''
SCREAMING_SNAKE_CASE_ : Dict = tokenizer
SCREAMING_SNAKE_CASE_ : Optional[Any] = prefix
if n_obs is not None:
SCREAMING_SNAKE_CASE_ : Any = self.src_lens[:n_obs]
SCREAMING_SNAKE_CASE_ : Dict = src_lang
SCREAMING_SNAKE_CASE_ : Tuple = tgt_lang
def __len__( self ):
"""simple docstring"""
return len(self.src_lens )
def __getitem__( self , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = index + 1 # linecache starts at 1
SCREAMING_SNAKE_CASE_ : Optional[int] = self.prefix + linecache.getline(str(self.src_file ) , lowerCAmelCase__ ).rstrip('\n' )
SCREAMING_SNAKE_CASE_ : str = linecache.getline(str(self.tgt_file ) , lowerCAmelCase__ ).rstrip('\n' )
assert source_line, F'''empty source line for index {index}'''
assert tgt_line, F'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , lowerCAmelCase__ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
SCREAMING_SNAKE_CASE_ : Optional[int] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , lowerCAmelCase__ ) else self.tokenizer
)
SCREAMING_SNAKE_CASE_ : List[str] = self.tokenizer.generator if isinstance(self.tokenizer , lowerCAmelCase__ ) else self.tokenizer
SCREAMING_SNAKE_CASE_ : Union[str, Any] = encode_line(lowerCAmelCase__ , lowerCAmelCase__ , self.max_source_length , 'right' )
SCREAMING_SNAKE_CASE_ : Dict = encode_line(lowerCAmelCase__ , lowerCAmelCase__ , self.max_target_length , 'right' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = source_inputs['input_ids'].squeeze()
SCREAMING_SNAKE_CASE_ : str = target_inputs['input_ids'].squeeze()
SCREAMING_SNAKE_CASE_ : str = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def UpperCamelCase__ ( lowerCAmelCase__ ):
"""simple docstring"""
return [len(lowerCAmelCase__ ) for x in Path(lowerCAmelCase__ ).open().readlines()]
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.stack([x['input_ids'] for x in batch] )
SCREAMING_SNAKE_CASE_ : int = torch.stack([x['attention_mask'] for x in batch] )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.stack([x['decoder_input_ids'] for x in batch] )
SCREAMING_SNAKE_CASE_ : int = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , lowerCAmelCase__ )
else self.tokenizer.pad_token_id
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , lowerCAmelCase__ )
else self.tokenizer.pad_token_id
)
SCREAMING_SNAKE_CASE_ : List[Any] = trim_batch(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = trim_batch(lowerCAmelCase__ , lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
lowerCAmelCase__ : str =getLogger(__name__)
def a__ ( A__ ):
return list(itertools.chain.from_iterable(A__ ) )
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : Dict = get_git_info()
save_json(A__, os.path.join(A__, 'git_log.json' ) )
def a__ ( A__, A__, A__=4, **A__ ):
with open(A__, 'w' ) as f:
json.dump(A__, A__, indent=A__, **A__ )
def a__ ( A__ ):
with open(A__ ) as f:
return json.load(A__ )
def a__ ( ):
SCREAMING_SNAKE_CASE_ : List[Any] = git.Repo(search_parent_directories=A__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
'repo_id': str(A__ ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def a__ ( A__, A__ ):
return list(map(A__, A__ ) )
def a__ ( A__, A__ ):
with open(A__, 'wb' ) as f:
return pickle.dump(A__, A__ )
def a__ ( A__ ):
def remove_articles(A__ ):
return re.sub(r'\b(a|an|the)\b', ' ', A__ )
def white_space_fix(A__ ):
return " ".join(text.split() )
def remove_punc(A__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(A__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(A__ ) ) ) )
def a__ ( A__, A__ ):
SCREAMING_SNAKE_CASE_ : Tuple = normalize_answer(A__ ).split()
SCREAMING_SNAKE_CASE_ : Optional[Any] = normalize_answer(A__ ).split()
SCREAMING_SNAKE_CASE_ : int = Counter(A__ ) & Counter(A__ )
SCREAMING_SNAKE_CASE_ : int = sum(common.values() )
if num_same == 0:
return 0
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 1.0 * num_same / len(A__ )
SCREAMING_SNAKE_CASE_ : List[str] = 1.0 * num_same / len(A__ )
SCREAMING_SNAKE_CASE_ : str = (2 * precision * recall) / (precision + recall)
return fa
def a__ ( A__, A__ ):
return normalize_answer(A__ ) == normalize_answer(A__ )
def a__ ( A__, A__ ):
assert len(A__ ) == len(A__ )
SCREAMING_SNAKE_CASE_ : List[Any] = 0
for hypo, pred in zip(A__, A__ ):
em += exact_match_score(A__, A__ )
if len(A__ ) > 0:
em /= len(A__ )
return {"em": em}
def a__ ( A__ ):
return model_prefix.startswith('rag' )
def a__ ( A__, A__, A__ ):
SCREAMING_SNAKE_CASE_ : Tuple = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
SCREAMING_SNAKE_CASE_ : Tuple = 'dropout_rate'
for p in extra_params:
if getattr(A__, A__, A__ ):
if not hasattr(A__, A__ ) and not hasattr(A__, equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(A__ ) )
delattr(A__, A__ )
continue
SCREAMING_SNAKE_CASE_ : Tuple = p if hasattr(A__, A__ ) else equivalent_param[p]
setattr(A__, A__, getattr(A__, A__ ) )
delattr(A__, A__ )
return hparams, config
| 162
|
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
SCREAMING_SNAKE_CASE_ : Union[str, Any] = len(A__ )
for i in range(n - 1 ):
for j in range(i + 1, A__ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def a__ ( A__ ):
if len(A__ ) <= 1:
return arr, 0
SCREAMING_SNAKE_CASE_ : Optional[int] = len(A__ ) // 2
SCREAMING_SNAKE_CASE_ : Union[str, Any] = arr[0:mid]
SCREAMING_SNAKE_CASE_ : List[str] = arr[mid:]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = count_inversions_recursive(A__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = count_inversions_recursive(A__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = _count_cross_inversions(A__, A__ )
SCREAMING_SNAKE_CASE_ : str = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def a__ ( A__, A__ ):
SCREAMING_SNAKE_CASE_ : List[str] = []
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
while i < len(A__ ) and j < len(A__ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(A__ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(A__ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def a__ ( ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = [1_0, 2, 1, 5, 5, 2, 1_1]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
SCREAMING_SNAKE_CASE_ : Optional[int] = count_inversions_bf(A__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = count_inversions_recursive(A__ )
assert num_inversions_bf == num_inversions_recursive == 8
print('number of inversions = ', A__ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
SCREAMING_SNAKE_CASE_ : int = count_inversions_bf(A__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = count_inversions_recursive(A__ )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ', A__ )
# an empty list should also have zero inversions
SCREAMING_SNAKE_CASE_ : Optional[int] = []
SCREAMING_SNAKE_CASE_ : List[str] = count_inversions_bf(A__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = count_inversions_recursive(A__ )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ', A__ )
if __name__ == "__main__":
main()
| 162
| 1
|
from __future__ import annotations
a : List[str] = [True] * 1_000_001
a : List[Any] = 2
while i * i <= 1_000_000:
if seive[i]:
for j in range(i * i, 1_000_001, i):
a : int = False
i += 1
def lowerCAmelCase_ (lowerCAmelCase__: int ):
"""simple docstring"""
return seive[n]
def lowerCAmelCase_ (lowerCAmelCase__: int ):
"""simple docstring"""
return any(digit in """02468""" for digit in str(lowerCAmelCase__ ) )
def lowerCAmelCase_ (lowerCAmelCase__: int = 1_0_0_0_0_0_0 ):
"""simple docstring"""
UpperCAmelCase_: str = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(lowerCAmelCase__ ) and not contains_an_even_digit(lowerCAmelCase__ ):
UpperCAmelCase_: Dict = str(lowerCAmelCase__ )
UpperCAmelCase_: Optional[int] = [int(str_num[j:] + str_num[:j] ) for j in range(len(lowerCAmelCase__ ) )]
if all(is_prime(lowerCAmelCase__ ) for i in list_nums ):
result.append(lowerCAmelCase__ )
return result
def lowerCAmelCase_ ():
"""simple docstring"""
return len(find_circular_primes() )
if __name__ == "__main__":
print(F'''{len(find_circular_primes()) = }''')
| 147
|
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
a : Optional[Any] = logging.getLogger(__name__)
require_version('pytorch_lightning>=1.0.4')
a : List[str] = {
'base': AutoModel,
'sequence-classification': AutoModelForSequenceClassification,
'question-answering': AutoModelForQuestionAnswering,
'pretraining': AutoModelForPreTraining,
'token-classification': AutoModelForTokenClassification,
'language-modeling': AutoModelWithLMHead,
'summarization': AutoModelForSeqaSeqLM,
'translation': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
a : Any = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
a : str = sorted(arg_to_scheduler.keys())
a : Any = '{' + ', '.join(arg_to_scheduler_choices) + '}'
class _a ( pl.LightningModule ):
def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_="base", SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_, ) -> Union[str, Any]:
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[Any] = 0
UpperCAmelCase_: Any = Path(self.hparams.output_dir )
UpperCAmelCase_: Dict = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
UpperCAmelCase_: str = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path, **({"""num_labels""": num_labels} if num_labels is not None else {}), cache_dir=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
else:
UpperCAmelCase_: PretrainedConfig = config
UpperCAmelCase_: Union[str, Any] = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(self.hparams, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
assert hasattr(self.config, SCREAMING_SNAKE_CASE_ ), f'model config doesn\'t have a `{p}` attribute'
setattr(self.config, SCREAMING_SNAKE_CASE_, getattr(self.hparams, SCREAMING_SNAKE_CASE_ ) )
if tokenizer is None:
UpperCAmelCase_: List[Any] = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path, cache_dir=SCREAMING_SNAKE_CASE_, )
else:
UpperCAmelCase_: PreTrainedTokenizer = tokenizer
UpperCAmelCase_: List[Any] = MODEL_MODES[mode]
if model is None:
UpperCAmelCase_: Any = self.model_type.from_pretrained(
self.hparams.model_name_or_path, from_tf=bool(""".ckpt""" in self.hparams.model_name_or_path ), config=self.config, cache_dir=SCREAMING_SNAKE_CASE_, )
else:
UpperCAmelCase_: Optional[Any] = model
def __snake_case (self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
UpperCAmelCase_: Any = self.model_type.from_pretrained(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> int:
UpperCAmelCase_: Dict = arg_to_scheduler[self.hparams.lr_scheduler]
UpperCAmelCase_: Optional[Any] = get_schedule_func(
self.opt, num_warmup_steps=self.hparams.warmup_steps, num_training_steps=self.total_steps() )
UpperCAmelCase_: Dict = {"""scheduler""": scheduler, """interval""": """step""", """frequency""": 1}
return scheduler
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_: str = self.model
UpperCAmelCase_: str = ["""bias""", """LayerNorm.weight"""]
UpperCAmelCase_: str = [
{
"""params""": [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
"""weight_decay""": self.hparams.weight_decay,
},
{
"""params""": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
if self.hparams.adafactor:
UpperCAmelCase_: List[str] = Adafactor(
SCREAMING_SNAKE_CASE_, lr=self.hparams.learning_rate, scale_parameter=SCREAMING_SNAKE_CASE_, relative_step=SCREAMING_SNAKE_CASE_ )
else:
UpperCAmelCase_: Union[str, Any] = AdamW(
SCREAMING_SNAKE_CASE_, lr=self.hparams.learning_rate, eps=self.hparams.adam_epsilon )
UpperCAmelCase_: Optional[int] = optimizer
UpperCAmelCase_: int = self.get_lr_scheduler()
return [optimizer], [scheduler]
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict:
return self.validation_step(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> Dict:
return self.validation_end(SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> int:
UpperCAmelCase_: Tuple = max(1, self.hparams.gpus ) # TODO: consider num_tpu_cores
UpperCAmelCase_: int = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
if stage == "test":
UpperCAmelCase_: int = len(self.test_dataloader().dataset )
else:
UpperCAmelCase_: Dict = self.get_dataloader("""train""", self.hparams.train_batch_size, shuffle=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[str] = len(self.train_dataloader().dataset )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = False ) -> str:
raise NotImplementedError("""You must implement this for your task""" )
def __snake_case (self ) -> List[str]:
return self.train_loader
def __snake_case (self ) -> int:
return self.get_dataloader("""dev""", self.hparams.eval_batch_size, shuffle=SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Any:
return self.get_dataloader("""test""", self.hparams.eval_batch_size, shuffle=SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> Dict:
return os.path.join(
self.hparams.data_dir, """cached_{}_{}_{}""".format(
SCREAMING_SNAKE_CASE_, list(filter(SCREAMING_SNAKE_CASE_, self.hparams.model_name_or_path.split("""/""" ) ) ).pop(), str(self.hparams.max_seq_length ), ), )
@pl.utilities.rank_zero_only
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> None:
UpperCAmelCase_: List[str] = self.output_dir.joinpath("""best_tfmr""" )
UpperCAmelCase_: List[Any] = self.step_count
self.model.save_pretrained(SCREAMING_SNAKE_CASE_ )
self.tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
@staticmethod
def __snake_case (SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
parser.add_argument(
"""--model_name_or_path""", default=SCREAMING_SNAKE_CASE_, type=SCREAMING_SNAKE_CASE_, required=SCREAMING_SNAKE_CASE_, help="""Path to pretrained model or model identifier from huggingface.co/models""", )
parser.add_argument(
"""--config_name""", default="""""", type=SCREAMING_SNAKE_CASE_, help="""Pretrained config name or path if not the same as model_name""" )
parser.add_argument(
"""--tokenizer_name""", default=SCREAMING_SNAKE_CASE_, type=SCREAMING_SNAKE_CASE_, help="""Pretrained tokenizer name or path if not the same as model_name""", )
parser.add_argument(
"""--cache_dir""", default=str(Path(SCREAMING_SNAKE_CASE_ ).parent / """test_run""" / """cache""" ), type=SCREAMING_SNAKE_CASE_, help="""Where do you want to store the pre-trained models downloaded from huggingface.co""", )
parser.add_argument(
"""--encoder_layerdrop""", type=SCREAMING_SNAKE_CASE_, help="""Encoder layer dropout probability (Optional). Goes into model.config""", )
parser.add_argument(
"""--decoder_layerdrop""", type=SCREAMING_SNAKE_CASE_, help="""Decoder layer dropout probability (Optional). Goes into model.config""", )
parser.add_argument(
"""--dropout""", type=SCREAMING_SNAKE_CASE_, help="""Dropout probability (Optional). Goes into model.config""", )
parser.add_argument(
"""--attention_dropout""", type=SCREAMING_SNAKE_CASE_, help="""Attention dropout probability (Optional). Goes into model.config""", )
parser.add_argument("""--learning_rate""", default=5E-5, type=SCREAMING_SNAKE_CASE_, help="""The initial learning rate for Adam.""" )
parser.add_argument(
"""--lr_scheduler""", default="""linear""", choices=SCREAMING_SNAKE_CASE_, metavar=SCREAMING_SNAKE_CASE_, type=SCREAMING_SNAKE_CASE_, help="""Learning rate scheduler""", )
parser.add_argument("""--weight_decay""", default=0.0, type=SCREAMING_SNAKE_CASE_, help="""Weight decay if we apply some.""" )
parser.add_argument("""--adam_epsilon""", default=1E-8, type=SCREAMING_SNAKE_CASE_, help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--warmup_steps""", default=0, type=SCREAMING_SNAKE_CASE_, help="""Linear warmup over warmup_steps.""" )
parser.add_argument("""--num_workers""", default=4, type=SCREAMING_SNAKE_CASE_, help="""kwarg passed to DataLoader""" )
parser.add_argument("""--num_train_epochs""", dest="""max_epochs""", default=3, type=SCREAMING_SNAKE_CASE_ )
parser.add_argument("""--train_batch_size""", default=32, type=SCREAMING_SNAKE_CASE_ )
parser.add_argument("""--eval_batch_size""", default=32, type=SCREAMING_SNAKE_CASE_ )
parser.add_argument("""--adafactor""", action="""store_true""" )
class _a ( pl.Callback ):
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[str]:
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class _a ( pl.Callback ):
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Any:
# print(pl_module.model.rag)
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(SCREAMING_SNAKE_CASE_ )
class _a ( pl.Callback ):
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> int:
UpperCAmelCase_: Optional[Any] = trainer.lr_schedulers[0]["""scheduler"""]
UpperCAmelCase_: Optional[int] = {f'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
rank_zero_info("""***** Validation results *****""" )
UpperCAmelCase_: int = trainer.callback_metrics
# Log results
for key in sorted(SCREAMING_SNAKE_CASE_ ):
if key not in ["log", "progress_bar"]:
rank_zero_info("""{} = {}\n""".format(SCREAMING_SNAKE_CASE_, str(metrics[key] ) ) )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
rank_zero_info("""***** Test results *****""" )
UpperCAmelCase_: Any = trainer.callback_metrics
# Log and save results to file
UpperCAmelCase_: List[Any] = os.path.join(pl_module.hparams.output_dir, """test_results.txt""" )
with open(SCREAMING_SNAKE_CASE_, """w""" ) as writer:
for key in sorted(SCREAMING_SNAKE_CASE_ ):
if key not in ["log", "progress_bar"]:
rank_zero_info("""{} = {}\n""".format(SCREAMING_SNAKE_CASE_, str(metrics[key] ) ) )
writer.write("""{} = {}\n""".format(SCREAMING_SNAKE_CASE_, str(metrics[key] ) ) )
def lowerCAmelCase_ (lowerCAmelCase__: int , lowerCAmelCase__: Optional[int] ):
"""simple docstring"""
parser.add_argument(
"""--output_dir""" , default=str(Path(lowerCAmelCase__ ).parent / """test_run""" / """model_checkpoints""" ) , type=lowerCAmelCase__ , help="""The output directory where the model predictions and checkpoints will be written.""" , )
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=lowerCAmelCase__ , default="""O2""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_tpu_cores""" , dest="""tpu_cores""" , type=lowerCAmelCase__ )
parser.add_argument("""--max_grad_norm""" , dest="""gradient_clip_val""" , default=1.0 , type=lowerCAmelCase__ , help="""Max gradient norm""" )
parser.add_argument("""--do_train""" , action="""store_true""" , help="""Whether to run training.""" )
parser.add_argument("""--do_predict""" , action="""store_true""" , help="""Whether to run predictions on the test set.""" )
parser.add_argument(
"""--gradient_accumulation_steps""" , dest="""accumulate_grad_batches""" , type=lowerCAmelCase__ , default=1 , help="""Number of updates steps to accumulate before performing a backward/update pass.""" , )
parser.add_argument("""--seed""" , type=lowerCAmelCase__ , default=4_2 , help="""random seed for initialization""" )
parser.add_argument(
"""--data_dir""" , default=str(Path(lowerCAmelCase__ ).parent / """test_run""" / """dummy-train-data""" ) , type=lowerCAmelCase__ , help="""The input data dir. Should contain the training files for the CoNLL-2003 NER task.""" , )
def lowerCAmelCase_ (lowerCAmelCase__: BaseTransformer , lowerCAmelCase__: argparse.Namespace , lowerCAmelCase__: Union[str, Any]=None , lowerCAmelCase__: Optional[Any]=True , lowerCAmelCase__: Dict=[] , lowerCAmelCase__: Tuple=None , lowerCAmelCase__: List[str]=None , **lowerCAmelCase__: List[Any] , ):
"""simple docstring"""
pl.seed_everything(args.seed )
# init model
UpperCAmelCase_: Dict = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=lowerCAmelCase__ )
# add custom checkpoints
if checkpoint_callback is None:
UpperCAmelCase_: Dict = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix="""checkpoint""" , monitor="""val_loss""" , mode="""min""" , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(lowerCAmelCase__ )
if logging_callback is None:
UpperCAmelCase_: Any = LoggingCallback()
UpperCAmelCase_: Optional[int] = {}
if args.fpaa:
UpperCAmelCase_: List[str] = 1_6
if args.gpus > 1:
UpperCAmelCase_: str = """auto"""
UpperCAmelCase_: Union[str, Any] = """ddp"""
UpperCAmelCase_: Tuple = args.accumulate_grad_batches
UpperCAmelCase_: Optional[int] = None
UpperCAmelCase_: List[Any] = """auto"""
UpperCAmelCase_: Any = pl.Trainer.from_argparse_args(
lowerCAmelCase__ , weights_summary=lowerCAmelCase__ , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowerCAmelCase__ , val_check_interval=1 , num_sanity_val_steps=2 , **lowerCAmelCase__ , )
if args.do_train:
trainer.fit(lowerCAmelCase__ )
else:
print("""RAG modeling tests with new set functions successfuly executed!""" )
return trainer
| 147
| 1
|
"""simple docstring"""
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
_UpperCAmelCase : Optional[Any] = {
"""huggingface/autoformer-tourism-monthly""": """https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json""",
}
class lowercase ( lowercase_ ):
__SCREAMING_SNAKE_CASE : List[str] = '''autoformer'''
__SCREAMING_SNAKE_CASE : Tuple = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self , snake_case = None , snake_case = None , snake_case = "student_t" , snake_case = "nll" , snake_case = 1 , snake_case = [1, 2, 3, 4, 5, 6, 7] , snake_case = True , snake_case = 0 , snake_case = 0 , snake_case = 0 , snake_case = 0 , snake_case = None , snake_case = None , snake_case = 64 , snake_case = 2 , snake_case = 2 , snake_case = 2 , snake_case = 2 , snake_case = 32 , snake_case = 32 , snake_case = "gelu" , snake_case = 0.1 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 100 , snake_case = 0.02 , snake_case = True , snake_case=True , snake_case = 10 , snake_case = 25 , snake_case = 3 , **snake_case , ):
# time series specific configuration
snake_case_ = prediction_length
snake_case_ = context_length if context_length is not None else prediction_length
snake_case_ = distribution_output
snake_case_ = loss
snake_case_ = input_size
snake_case_ = num_time_features
snake_case_ = lags_sequence
snake_case_ = scaling
snake_case_ = num_dynamic_real_features
snake_case_ = num_static_real_features
snake_case_ = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(snake_case ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
snake_case_ = cardinality
else:
snake_case_ = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(snake_case ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
snake_case_ = embedding_dimension
else:
snake_case_ = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
snake_case_ = num_parallel_samples
# Transformer architecture configuration
snake_case_ = input_size * len(self.lags_sequence ) + self._number_of_features
snake_case_ = d_model
snake_case_ = encoder_attention_heads
snake_case_ = decoder_attention_heads
snake_case_ = encoder_ffn_dim
snake_case_ = decoder_ffn_dim
snake_case_ = encoder_layers
snake_case_ = decoder_layers
snake_case_ = dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = encoder_layerdrop
snake_case_ = decoder_layerdrop
snake_case_ = activation_function
snake_case_ = init_std
snake_case_ = use_cache
# Autoformer
snake_case_ = label_length
snake_case_ = moving_average
snake_case_ = autocorrelation_factor
super().__init__(is_encoder_decoder=snake_case , **snake_case )
@property
def a ( self ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 354
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 200
| 0
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
_UpperCamelCase = [
'''EAGER''',
'''AOT_EAGER''',
'''INDUCTOR''',
'''NVFUSER''',
'''AOT_NVFUSER''',
'''AOT_CUDAGRAPHS''',
'''OFI''',
'''FX2TRT''',
'''ONNXRT''',
'''IPEX''',
]
def lowerCAmelCase__( lowercase : Optional[int] , lowercase : List[str]=None , lowercase : Any=None , lowercase : Union[str, Any]=None ) -> List[str]:
__snake_case : Dict = True
while ask_again:
__snake_case : Tuple = input(lowercase )
try:
if default is not None and len(lowercase ) == 0:
return default
return convert_value(lowercase ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(lowercase )
def lowerCAmelCase__( lowercase : Optional[int] , lowercase : Tuple=[] , lowercase : Tuple=None , lowercase : Optional[Any]=0 ) -> Any:
__snake_case : Dict = BulletMenu(lowercase , lowercase )
__snake_case : Tuple = menu.run(default_choice=lowercase )
return convert_value(lowercase ) if convert_value is not None else result
def lowerCAmelCase__( lowercase : Tuple ) -> Union[str, Any]:
__snake_case : Optional[Any] = int(lowercase )
return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value] )
def lowerCAmelCase__( lowercase : List[Any] ) -> Tuple:
__snake_case : str = int(lowercase )
return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "TPU"][value] )
def lowerCAmelCase__( lowercase : List[str] ) -> Tuple:
__snake_case : Dict = int(lowercase )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def lowerCAmelCase__( lowercase : Optional[int] ) -> Any:
__snake_case : List[str] = int(lowercase )
return PrecisionType(["no", "fp16", "bf16", "fp8"][value] )
def lowerCAmelCase__( lowercase : Optional[int] ) -> Optional[int]:
__snake_case : List[Any] = int(lowercase )
return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value] )
def lowerCAmelCase__( lowercase : Optional[Any] ) -> str:
return {"yes": True, "no": False}[value.lower()]
class _lowerCamelCase ( argparse.RawDescriptionHelpFormatter ):
"""simple docstring"""
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
__snake_case : Optional[int] = super()._format_usage(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
__snake_case : Any = usage.replace("<command> [<args>] " , "" )
return usage
| 326
|
from __future__ import annotations
def lowerCAmelCase__( lowercase : str , lowercase : list[str] | None = None ) -> list[list[str]]:
__snake_case : List[str] = word_bank or []
# create a table
__snake_case : int = len(lowercase ) + 1
__snake_case : list[list[list[str]]] = []
for _ in range(lowercase ):
table.append([] )
# seed value
__snake_case : Optional[int] = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowercase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowercase )] == word:
__snake_case : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowercase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowercase )]:
combination.reverse()
return table[len(lowercase )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 326
| 1
|
import datasets
A_ : List[Any] = '\\n@InProceedings{conneau2018xnli,\n author = "Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin",\n title = "XNLI: Evaluating Cross-lingual Sentence Representations",\n booktitle = "Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing",\n year = "2018",\n publisher = "Association for Computational Linguistics",\n location = "Brussels, Belgium",\n}\n'
A_ : List[str] = '\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n'
A_ : Any = '\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n \'accuracy\': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric("xnli")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n'
def UpperCamelCase (lowercase_: Optional[Any] , lowercase_: List[Any] ) -> Dict:
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a (datasets.Metric ):
'''simple docstring'''
def __A ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ),
"""references""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , )
def __A ( self , A__ , A__ ):
return {"accuracy": simple_accuracy(A__ , A__ )}
| 141
|
import argparse
from collections import defaultdict
def UpperCamelCase (lowercase_: List[str] , lowercase_: Optional[int] , lowercase_: Optional[Any] , lowercase_: Union[str, Any] , lowercase_: Any ) -> int:
A__ : Optional[Any] = f"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(lowercase_ , """r""" ) as f:
A__ : Union[str, Any] = f.readlines()
A__ : str = f"""class {class_name}("""
A__ : Optional[Any] = f"""{4 * ' '}def {test_name}("""
A__ : Union[str, Any] = f"""{8 * ' '}{correct_line.split()[0]}"""
A__ : Optional[int] = f"""{16 * ' '}{correct_line.split()[0]}"""
A__ : int = False
A__ : str = False
A__ : Tuple = False
A__ : Optional[int] = False
A__ : Optional[Any] = 0
A__ : Dict = 0
A__ : List[str] = []
for line in lines:
if line.startswith(lowercase_ ):
A__ : Dict = True
elif in_class and line.startswith(lowercase_ ):
A__ : Optional[Any] = True
elif in_class and in_func and (line.startswith(lowercase_ ) or line.startswith(lowercase_ )):
A__ : Tuple = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
A__ : Any = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
A__ : Dict = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f"""{spaces * ' '}{correct_line}""" )
A__ : List[str] = False
else:
new_lines.append(lowercase_ )
with open(lowercase_ , """w""" ) as f:
for line in new_lines:
f.write(lowercase_ )
def UpperCamelCase (lowercase_: List[str] , lowercase_: Optional[Any]=None ) -> Any:
if fail is not None:
with open(lowercase_ , """r""" ) as f:
A__ : Dict = {l.strip() for l in f.readlines()}
else:
A__ : List[str] = None
with open(lowercase_ , """r""" ) as f:
A__ : int = f.readlines()
A__ : Union[str, Any] = defaultdict(lowercase_ )
for line in correct_lines:
A__ , A__ , A__ , A__ : Optional[int] = line.split(""";""" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
if __name__ == "__main__":
A_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--correct_filename', help='filename of tests with expected result')
parser.add_argument('--fail_filename', help='filename of test failures', type=str, default=None)
A_ : Optional[Any] = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 141
| 1
|
import math
import os
import sys
def __UpperCamelCase ( _lowerCAmelCase ) -> str:
"""simple docstring"""
A : str = ''''''
try:
with open(__a , """rb""" ) as binary_file:
A : Optional[int] = binary_file.read()
for dat in data:
A : int = f'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print("""File not accessible""" )
sys.exit()
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> None:
"""simple docstring"""
lexicon.pop(__a )
A : Optional[Any] = last_match_id
if math.loga(__a ).is_integer():
for curr_key in lexicon:
A : Dict = '''0''' + lexicon[curr_key]
A : str = bin(__a )[2:]
def __UpperCamelCase ( _lowerCAmelCase ) -> str:
"""simple docstring"""
A : Optional[int] = {'''0''': '''0''', '''1''': '''1'''}
A : Any = '''''', ''''''
A : Tuple = len(__a )
for i in range(len(__a ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
A : int = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(__a , __a , __a , __a )
index += 1
A : int = ''''''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
A : Union[str, Any] = lexicon[curr_string]
result += last_match_id
return result
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> str:
"""simple docstring"""
A : Tuple = os.path.getsize(__a )
A : List[str] = bin(__a )[2:]
A : List[Any] = len(__a )
return "0" * (length_length - 1) + file_length_binary + compressed
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> None:
"""simple docstring"""
A : List[str] = 8
try:
with open(__a , """wb""" ) as opened_file:
A : int = [
to_write[i : i + byte_length]
for i in range(0 , len(__a ) , __a )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("""10000000""" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(__a , 2 ).to_bytes(1 , byteorder="""big""" ) )
except OSError:
print("""File not accessible""" )
sys.exit()
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> None:
"""simple docstring"""
A : Any = read_file_binary(__a )
A : List[str] = compress_data(__a )
A : Dict = add_file_length(__a , __a )
write_file_binary(__a , __a )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 116
|
"""simple docstring"""
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@parameterized.expand([(None,), ('''foo.json''',)])
def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = GenerationConfig(
do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowercase_ , config_name=lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = GenerationConfig.from_pretrained(lowercase_ , config_name=lowercase_)
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , lowercase_)
self.assertEqual(loaded_config.temperature , 0.7)
self.assertEqual(loaded_config.length_penalty , 1.0)
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]])
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50)
self.assertEqual(loaded_config.max_length , 20)
self.assertEqual(loaded_config.max_time , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoConfig.from_pretrained('''gpt2''')
SCREAMING_SNAKE_CASE_ : int = GenerationConfig.from_model_config(lowercase_)
SCREAMING_SNAKE_CASE_ : int = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(lowercase_ , lowercase_)
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id)
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id)
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = GenerationConfig()
SCREAMING_SNAKE_CASE_ : Any = {
'''max_new_tokens''': 1024,
'''foo''': '''bar''',
}
SCREAMING_SNAKE_CASE_ : str = copy.deepcopy(lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = generation_config.update(**lowercase_)
# update_kwargs was not modified (no side effects)
self.assertEqual(lowercase_ , lowercase_)
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024)
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(lowercase_ , {'''foo''': '''bar'''})
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = GenerationConfig()
SCREAMING_SNAKE_CASE_ : List[str] = '''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''') as tmp_dir:
generation_config.save_pretrained(lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = GenerationConfig.from_pretrained(lowercase_)
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , '''bar''')
SCREAMING_SNAKE_CASE_ : Optional[Any] = GenerationConfig.from_model_config(lowercase_)
assert not hasattr(lowercase_ , '''foo''') # no new kwargs should be initialized if from config
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0)
self.assertEqual(default_config.do_sample , lowercase_)
self.assertEqual(default_config.num_beams , 1)
SCREAMING_SNAKE_CASE_ : Dict = GenerationConfig(
do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7)
self.assertEqual(config.do_sample , lowercase_)
self.assertEqual(config.num_beams , 1)
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = GenerationConfig.from_pretrained(lowercase_ , temperature=1.0)
self.assertEqual(loaded_config.temperature , 1.0)
self.assertEqual(loaded_config.do_sample , lowercase_)
self.assertEqual(loaded_config.num_beams , 1) # default value
@is_staging_test
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = TOKEN
HfFolder.save_token(lowercase_)
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[str]):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='''test-generation-config''')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''')
except HTTPError:
pass
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = GenerationConfig(
do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''test-generation-config''' , use_auth_token=self._token)
SCREAMING_SNAKE_CASE_ : int = GenerationConfig.from_pretrained(F'{USER}/test-generation-config')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_))
# Reset repo
delete_repo(token=self._token , repo_id='''test-generation-config''')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowercase_ , repo_id='''test-generation-config''' , push_to_hub=lowercase_ , use_auth_token=self._token)
SCREAMING_SNAKE_CASE_ : Optional[int] = GenerationConfig.from_pretrained(F'{USER}/test-generation-config')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_))
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = GenerationConfig(
do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token)
SCREAMING_SNAKE_CASE_ : Any = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_))
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowercase_ , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=lowercase_ , use_auth_token=self._token)
SCREAMING_SNAKE_CASE_ : Any = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_))
| 91
| 0
|
'''simple docstring'''
def __magic_name__ ( A = 1_0 , A = 1_0_0_0 , A = True ) -> int:
assert (
isinstance(A , A )
and isinstance(A , A )
and isinstance(A , A )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('Invalid value for min_val or max_val (min_value < max_value)' )
return min_val if option else max_val
def __magic_name__ ( A , A ) -> int:
return int((number_a + number_a) / 2 )
def __magic_name__ ( A , A , A ) -> None:
assert (
isinstance(A , A ) and isinstance(A , A ) and isinstance(A , A )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('argument value for lower and higher must be(lower > higher)' )
if not lower < to_guess < higher:
raise ValueError(
'guess value must be within the range of lower and higher value' )
def answer(A ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('started...' )
snake_case = lower
snake_case = higher
snake_case = []
while True:
snake_case = get_avg(A , A )
last_numbers.append(A )
if answer(A ) == "low":
snake_case = number
elif answer(A ) == "high":
snake_case = number
else:
break
print(F'''guess the number : {last_numbers[-1]}''' )
print(F'''details : {last_numbers!s}''' )
def __magic_name__ ( ) -> None:
snake_case = int(input('Enter lower value : ' ).strip() )
snake_case = int(input('Enter high value : ' ).strip() )
snake_case = int(input('Enter value to guess : ' ).strip() )
guess_the_number(A , A , A )
if __name__ == "__main__":
main()
| 332
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase ( __lowerCAmelCase ):
def __init__( self, *lowercase_, **lowercase_ ) -> None:
warnings.warn(
'The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DPTImageProcessor instead.', lowercase_, )
super().__init__(*lowercase_, **lowercase_ )
| 332
| 1
|
'''simple docstring'''
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
__SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCamelCase_ ( _UpperCAmelCase : str ) -> Dict:
"""simple docstring"""
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(_UpperCAmelCase ):
return ext
raise Exception(
F"""Unable to determine file format from file extension {path}. """
F"""Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}""" )
def UpperCamelCase_ ( _UpperCAmelCase : int ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
_UpperCAmelCase : Optional[Any] = try_infer_format_from_ext(args.input ) if args.format == "infer" else args.format
_UpperCAmelCase : List[Any] = PipelineDataFormat.from_str(
format=_UpperCAmelCase , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(_UpperCAmelCase , _UpperCAmelCase )
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
def __init__( self : str , A : Pipeline , A : PipelineDataFormat ):
_UpperCAmelCase : Any = nlp
_UpperCAmelCase : List[Any] = reader
@staticmethod
def _A ( A : ArgumentParser ):
_UpperCAmelCase : List[str] = parser.add_parser("run" , help="Run a pipeline through the CLI" )
run_parser.add_argument("--task" , choices=get_supported_tasks() , help="Task to run" )
run_parser.add_argument("--input" , type=A , help="Path to the file to use for inference" )
run_parser.add_argument("--output" , type=A , help="Path to the file that will be used post to write results." )
run_parser.add_argument("--model" , type=A , help="Name or path to the model to instantiate." )
run_parser.add_argument("--config" , type=A , help="Name or path to the model's config to instantiate." )
run_parser.add_argument(
"--tokenizer" , type=A , help="Name of the tokenizer to use. (default: same as the model name)" )
run_parser.add_argument(
"--column" , type=A , help="Name of the column to use as input. (For multi columns input as QA use column1,columns2)" , )
run_parser.add_argument(
"--format" , type=A , default="infer" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="Input format to read from" , )
run_parser.add_argument(
"--device" , type=A , default=-1 , help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)" , )
run_parser.add_argument("--overwrite" , action="store_true" , help="Allow overwriting the output file." )
run_parser.set_defaults(func=A )
def _A ( self : int ):
_UpperCAmelCase , _UpperCAmelCase : Dict = self._nlp, []
for entry in self._reader:
_UpperCAmelCase : Optional[int] = nlp(**A ) if self._reader.is_multi_columns else nlp(A )
if isinstance(A , A ):
outputs.append(A )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
_UpperCAmelCase : Dict = self._reader.save_binary(A )
logger.warning(F"""Current pipeline requires output to be in binary format, saving at {binary_path}""" )
else:
self._reader.save(A )
| 31
|
'''simple docstring'''
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
"""constant""": get_constant_schedule,
"""constant_w_warmup""": get_constant_schedule_with_warmup,
}
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
def __init__( self : Any , A : Optional[int]=None , A : Tuple=None , *A : Tuple , **A : List[str] ):
super().__init__(*A , **A )
if config is None:
assert isinstance(self.model , A ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F""" {self.model.__class__}"""
)
_UpperCAmelCase : str = self.model.config
else:
_UpperCAmelCase : List[str] = config
_UpperCAmelCase : List[Any] = data_args
_UpperCAmelCase : str = self.config.tgt_vocab_size if isinstance(self.config , A ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
" padding.." )
if self.args.label_smoothing == 0:
_UpperCAmelCase : Optional[Any] = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_UpperCAmelCase : Dict = label_smoothed_nll_loss
def _A ( self : Tuple , A : int ):
if self.optimizer is None:
_UpperCAmelCase : Tuple = ["bias", "LayerNorm.weight"]
_UpperCAmelCase : str = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"weight_decay": 0.0,
},
]
_UpperCAmelCase : int = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_UpperCAmelCase : List[str] = Adafactor
_UpperCAmelCase : List[Any] = {"scale_parameter": False, "relative_step": False}
else:
_UpperCAmelCase : List[str] = AdamW
_UpperCAmelCase : List[str] = {
"betas": (self.args.adam_betaa, self.args.adam_betaa),
"eps": self.args.adam_epsilon,
}
_UpperCAmelCase : List[Any] = self.args.learning_rate
if self.sharded_ddp:
_UpperCAmelCase : List[Any] = OSS(
params=A , optim=A , **A , )
else:
_UpperCAmelCase : Union[str, Any] = optimizer_cls(A , **A )
if self.lr_scheduler is None:
_UpperCAmelCase : List[str] = self._get_lr_scheduler(A )
else: # ignoring --lr_scheduler
logger.warning("scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored." )
def _A ( self : List[str] , A : Optional[int] ):
_UpperCAmelCase : List[str] = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_UpperCAmelCase : Optional[Any] = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_UpperCAmelCase : str = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
_UpperCAmelCase : str = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=A )
return scheduler
def _A ( self : Tuple ):
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def _A ( self : Any , A : Union[str, Any] , A : Union[str, Any] , A : List[Any] ):
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_UpperCAmelCase : List[str] = model(**A , use_cache=A )[0]
_UpperCAmelCase : int = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
_UpperCAmelCase , _UpperCAmelCase : Any = model(**A , labels=A , use_cache=A )[:2]
else:
# compute label smoothed loss
_UpperCAmelCase : Optional[int] = model(**A , use_cache=A )[0]
_UpperCAmelCase : List[str] = torch.nn.functional.log_softmax(A , dim=-1 )
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = self.loss_fn(A , A , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def _A ( self : List[str] , A : Optional[int] , A : Optional[int] ):
_UpperCAmelCase : Union[str, Any] = inputs.pop("labels" )
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = self._compute_loss(A , A , A )
return loss
def _A ( self : List[str] , A : nn.Module , A : Dict[str, Union[torch.Tensor, Any]] , A : bool , A : Optional[List[str]] = None , ):
_UpperCAmelCase : List[str] = self._prepare_inputs(A )
_UpperCAmelCase : Dict = {
"max_length": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"num_beams": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_UpperCAmelCase : Dict = self.model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , **A , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_UpperCAmelCase : int = self._pad_tensors_to_max_len(A , gen_kwargs["max_length"] )
_UpperCAmelCase : Any = inputs.pop("labels" )
with torch.no_grad():
# compute loss on predict data
_UpperCAmelCase , _UpperCAmelCase : str = self._compute_loss(A , A , A )
_UpperCAmelCase : List[str] = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_UpperCAmelCase : str = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_UpperCAmelCase : Optional[Any] = self._pad_tensors_to_max_len(A , gen_kwargs["max_length"] )
return (loss, logits, labels)
def _A ( self : Dict , A : int , A : List[str] ):
# If PAD token is not defined at least EOS token has to be defined
_UpperCAmelCase : Union[str, Any] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"
F""" padded to `max_length`={max_length}""" )
_UpperCAmelCase : Tuple = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
_UpperCAmelCase : Tuple = tensor
return padded_tensor
| 31
| 1
|
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
__snake_case :Union[str, Any] = logging.getLogger(__name__)
@dataclass(frozen=__UpperCAmelCase )
class _A :
UpperCamelCase__ : str
UpperCamelCase__ : str
UpperCamelCase__ : Optional[str] = None
UpperCamelCase__ : Optional[str] = None
UpperCamelCase__ : Optional[str] = None
@dataclass(frozen=__UpperCAmelCase )
class _A :
UpperCamelCase__ : List[int]
UpperCamelCase__ : Optional[List[int]] = None
UpperCamelCase__ : Optional[List[int]] = None
UpperCamelCase__ : Optional[Union[int, float]] = None
UpperCamelCase__ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : List[InputFeatures]
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : PreTrainedTokenizer , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : int=False , __SCREAMING_SNAKE_CASE : bool = False , ):
'''simple docstring'''
__a = hans_processors[task]()
__a = os.path.join(
__SCREAMING_SNAKE_CASE , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE , ) , )
__a = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__a , __a = label_list[2], label_list[1]
__a = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__a = cached_features_file + '''.lock'''
with FileLock(__SCREAMING_SNAKE_CASE):
if os.path.exists(__SCREAMING_SNAKE_CASE) and not overwrite_cache:
logger.info(F'Loading features from cached file {cached_features_file}')
__a = torch.load(__SCREAMING_SNAKE_CASE)
else:
logger.info(F'Creating features from dataset file at {data_dir}')
__a = (
processor.get_dev_examples(__SCREAMING_SNAKE_CASE) if evaluate else processor.get_train_examples(__SCREAMING_SNAKE_CASE)
)
logger.info('''Training examples: %s''' , len(__SCREAMING_SNAKE_CASE))
__a = hans_convert_examples_to_features(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
logger.info('''Saving features into cached file %s''' , __SCREAMING_SNAKE_CASE)
torch.save(self.features , __SCREAMING_SNAKE_CASE)
def __len__( self : Dict):
'''simple docstring'''
return len(self.features)
def __getitem__( self : int , __SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
return self.features[i]
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
return self.label_list
if is_tf_available():
import tensorflow as tf
class _A :
UpperCamelCase__ : List[InputFeatures]
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : PreTrainedTokenizer , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] = 128 , __SCREAMING_SNAKE_CASE : List[Any]=False , __SCREAMING_SNAKE_CASE : bool = False , ):
'''simple docstring'''
__a = hans_processors[task]()
__a = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__a , __a = label_list[2], label_list[1]
__a = label_list
__a = processor.get_dev_examples(__SCREAMING_SNAKE_CASE) if evaluate else processor.get_train_examples(__SCREAMING_SNAKE_CASE)
__a = hans_convert_examples_to_features(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features) , desc='''convert examples to features'''):
if ex_index % 10_000 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(__SCREAMING_SNAKE_CASE)))
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
__a = tf.data.Dataset.from_generator(
__SCREAMING_SNAKE_CASE , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([]),
'''input_ids''': tf.TensorShape([None, None]),
'''attention_mask''': tf.TensorShape([None, None]),
'''token_type_ids''': tf.TensorShape([None, None]),
},
tf.TensorShape([]),
) , )
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
return self.dataset
def __len__( self : Any):
'''simple docstring'''
return len(self.features)
def __getitem__( self : Any , __SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
return self.features[i]
def _lowerCamelCase ( self : str):
'''simple docstring'''
return self.label_list
class _A ( __UpperCAmelCase ):
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(__SCREAMING_SNAKE_CASE , '''heuristics_train_set.txt''')) , '''train''')
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : List[str]):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(__SCREAMING_SNAKE_CASE , '''heuristics_evaluation_set.txt''')) , '''dev''')
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
return ["contradiction", "entailment", "neutral"]
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
__a = []
for i, line in enumerate(__SCREAMING_SNAKE_CASE):
if i == 0:
continue
__a = '''%s-%s''' % (set_type, line[0])
__a = line[5]
__a = line[6]
__a = line[7][2:] if line[7].startswith('''ex''') else line[7]
__a = line[0]
examples.append(InputExample(guid=__SCREAMING_SNAKE_CASE , text_a=__SCREAMING_SNAKE_CASE , text_b=__SCREAMING_SNAKE_CASE , label=__SCREAMING_SNAKE_CASE , pairID=__SCREAMING_SNAKE_CASE))
return examples
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
__a = {label: i for i, label in enumerate(_UpperCAmelCase )}
__a = []
for ex_index, example in tqdm.tqdm(enumerate(_UpperCAmelCase ) , desc='''convert examples to features''' ):
if ex_index % 10000 == 0:
logger.info('''Writing example %d''' % (ex_index) )
__a = tokenizer(
example.text_a , example.text_b , add_special_tokens=_UpperCAmelCase , max_length=_UpperCAmelCase , padding='''max_length''' , truncation=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , )
__a = label_map[example.label] if example.label in label_map else 0
__a = int(example.pairID )
features.append(InputFeatures(**_UpperCAmelCase , label=_UpperCAmelCase , pairID=_UpperCAmelCase ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(f'guid: {example}' )
logger.info(f'features: {features[i]}' )
return features
__snake_case :Union[str, Any] = {
'''hans''': 3,
}
__snake_case :Tuple = {
'''hans''': HansProcessor,
}
| 131
|
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
def get_matched_characters(_UpperCAmelCase , _UpperCAmelCase ) -> str:
__a = []
__a = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
__a = int(max(0 , i - limit ) )
__a = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(_UpperCAmelCase )
__a = f'{_stra[0:_stra.index(_UpperCAmelCase )]} {_stra[_stra.index(_UpperCAmelCase ) + 1:]}'
return "".join(_UpperCAmelCase )
# matching characters
__a = get_matched_characters(_UpperCAmelCase , _UpperCAmelCase )
__a = get_matched_characters(_UpperCAmelCase , _UpperCAmelCase )
__a = len(_UpperCAmelCase )
# transposition
__a = (
len([(ca, ca) for ca, ca in zip(_UpperCAmelCase , _UpperCAmelCase ) if ca != ca] ) // 2
)
if not match_count:
__a = 0.0
else:
__a = (
1
/ 3
* (
match_count / len(_UpperCAmelCase )
+ match_count / len(_UpperCAmelCase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
__a = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('''hello''', '''world'''))
| 131
| 1
|
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class lowercase ( unittest.TestCase ):
def a ( self ):
snake_case_ = 10
def a ( self ):
snake_case_ = [1, 2, 3, 4]
snake_case_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(lowercase_ , self.block_size , 0 ) , lowercase_ )
def a ( self ):
snake_case_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
snake_case_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(lowercase_ , self.block_size , 0 ) , lowercase_ )
def a ( self ):
snake_case_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
snake_case_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(lowercase_ , self.block_size , 0 ) , lowercase_ )
def a ( self ):
snake_case_ = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.'
snake_case_ , snake_case_ = process_story(lowercase_ )
self.assertEqual(lowercase_ , [] )
def a ( self ):
snake_case_ = ''
snake_case_ , snake_case_ = process_story(lowercase_ )
self.assertEqual(lowercase_ , [] )
self.assertEqual(lowercase_ , [] )
def a ( self ):
snake_case_ = (
'It was the year of Our Lord one thousand seven hundred and '
'seventy-five\n\nSpiritual revelations were conceded to England '
'at that favoured period, as at this.\n@highlight\n\nIt was the best of times'
)
snake_case_ , snake_case_ = process_story(lowercase_ )
snake_case_ = [
'It was the year of Our Lord one thousand seven hundred and seventy-five.',
'Spiritual revelations were conceded to England at that favoured period, as at this.',
]
self.assertEqual(lowercase_ , lowercase_ )
snake_case_ = ['It was the best of times.']
self.assertEqual(lowercase_ , lowercase_ )
def a ( self ):
snake_case_ = torch.tensor([1, 2, 3, 4] )
snake_case_ = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(lowercase_ , 0 ).numpy() , expected.numpy() )
def a ( self ):
snake_case_ = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
snake_case_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowercase_ , 23 ).numpy() , expected.numpy() )
def a ( self ):
snake_case_ = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
snake_case_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowercase_ , 1 ).numpy() , expected.numpy() )
def a ( self ):
snake_case_ = 101
snake_case_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
snake_case_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
snake_case_ = compute_token_type_ids(lowercase_ , lowercase_ )
np.testing.assert_array_equal(lowercase_ , lowercase_ )
| 285
|
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class A :
"""simple docstring"""
def __init__( self : str,lowercase_ : Any,lowercase_ : Tuple=1_3,lowercase_ : str=7,lowercase_ : Tuple=True,lowercase_ : int=True,lowercase_ : List[Any]=True,lowercase_ : List[str]=True,lowercase_ : List[str]=9_9,lowercase_ : List[Any]=6_4,lowercase_ : List[str]=5,lowercase_ : Optional[Any]=4,lowercase_ : Optional[Any]=3_7,lowercase_ : Optional[Any]="gelu",lowercase_ : int=0.1,lowercase_ : str=0.1,lowercase_ : Optional[Any]=5_1_2,lowercase_ : int=1_6,lowercase_ : List[Any]=2,lowercase_ : Union[str, Any]=0.02,lowercase_ : Tuple=3,lowercase_ : List[Any]=4,lowercase_ : str=None,)-> Union[str, Any]:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = scope
A__ = vocab_size - 1
def snake_case__ ( self : str )-> Optional[Any]:
'''simple docstring'''
A__ = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
A__ = self.get_config()
return config, input_ids, input_mask, token_labels
def snake_case__ ( self : List[Any] )-> Tuple:
'''simple docstring'''
return GPTNeoXConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=lowercase_,initializer_range=self.initializer_range,pad_token_id=self.pad_token_id,)
def snake_case__ ( self : Optional[int] )-> Union[str, Any]:
'''simple docstring'''
A__ , A__ , A__ , A__ = self.prepare_config_and_inputs()
A__ = True
return config, input_ids, input_mask, token_labels
def snake_case__ ( self : Any,lowercase_ : List[Any],lowercase_ : List[Any],lowercase_ : str )-> Any:
'''simple docstring'''
A__ = GPTNeoXModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(lowercase_,attention_mask=lowercase_ )
A__ = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self : Union[str, Any],lowercase_ : List[str],lowercase_ : Dict,lowercase_ : Optional[Any] )-> Tuple:
'''simple docstring'''
A__ = True
A__ = GPTNeoXModel(lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(lowercase_,attention_mask=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self : Union[str, Any],lowercase_ : str,lowercase_ : Union[str, Any],lowercase_ : Union[str, Any],lowercase_ : List[str] )-> List[str]:
'''simple docstring'''
A__ = GPTNeoXForCausalLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(lowercase_,attention_mask=lowercase_,labels=lowercase_ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self : Optional[int],lowercase_ : Optional[int],lowercase_ : Optional[int],lowercase_ : Dict,lowercase_ : Any )-> int:
'''simple docstring'''
A__ = self.num_labels
A__ = GPTNeoXForQuestionAnswering(lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(lowercase_,attention_mask=lowercase_ )
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def snake_case__ ( self : List[str],lowercase_ : List[str],lowercase_ : int,lowercase_ : Union[str, Any],lowercase_ : Optional[int] )-> str:
'''simple docstring'''
A__ = self.num_labels
A__ = GPTNeoXForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = ids_tensor([self.batch_size],self.type_sequence_label_size )
A__ = model(lowercase_,attention_mask=lowercase_,labels=lowercase_ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def snake_case__ ( self : Any,lowercase_ : Union[str, Any],lowercase_ : List[Any],lowercase_ : Optional[Any],lowercase_ : int )-> Union[str, Any]:
'''simple docstring'''
A__ = self.num_labels
A__ = GPTNeoXForTokenClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(lowercase_,attention_mask=lowercase_,labels=lowercase_ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self : int,lowercase_ : str,lowercase_ : int,lowercase_ : Union[str, Any] )-> List[Any]:
'''simple docstring'''
A__ = True
A__ = GPTNeoXForCausalLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
# first forward pass
A__ = model(lowercase_,attention_mask=lowercase_,use_cache=lowercase_ )
A__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A__ = ids_tensor((self.batch_size, 3),config.vocab_size )
A__ = ids_tensor((self.batch_size, 3),vocab_size=2 )
# append to next input_ids and
A__ = torch.cat([input_ids, next_tokens],dim=-1 )
A__ = torch.cat([input_mask, next_mask],dim=-1 )
A__ = model(lowercase_,attention_mask=lowercase_,output_hidden_states=lowercase_ )
A__ = output_from_no_past['hidden_states'][0]
A__ = model(
lowercase_,attention_mask=lowercase_,past_key_values=lowercase_,output_hidden_states=lowercase_,)['hidden_states'][0]
# select random slice
A__ = ids_tensor((1,),output_from_past.shape[-1] ).item()
A__ = output_from_no_past[:, -3:, random_slice_idx].detach()
A__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_,lowercase_,atol=1E-3 ) )
def snake_case__ ( self : str )-> Union[str, Any]:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ , A__ = config_and_inputs
A__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase = (GPTNeoXForCausalLM,) if is_torch_available() else ()
lowerCamelCase = (
{
'feature-extraction': GPTNeoXModel,
'question-answering': GPTNeoXForQuestionAnswering,
'text-classification': GPTNeoXForSequenceClassification,
'text-generation': GPTNeoXForCausalLM,
'token-classification': GPTNeoXForTokenClassification,
'zero-shot': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def snake_case__ ( self : str )-> Tuple:
'''simple docstring'''
A__ = GPTNeoXModelTester(self )
A__ = ConfigTester(self,config_class=lowercase_,hidden_size=6_4,num_attention_heads=8 )
def snake_case__ ( self : Optional[Any] )-> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self : Union[str, Any] )-> Union[str, Any]:
'''simple docstring'''
A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowercase_,lowercase_,lowercase_ )
def snake_case__ ( self : Dict )-> List[Any]:
'''simple docstring'''
A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowercase_,lowercase_,lowercase_ )
def snake_case__ ( self : List[str] )-> Any:
'''simple docstring'''
A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs_for_decoder()
A__ = None
self.model_tester.create_and_check_model_as_decoder(lowercase_,lowercase_,lowercase_ )
def snake_case__ ( self : Optional[Any] )-> str:
'''simple docstring'''
A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowercase_,lowercase_,lowercase_ )
def snake_case__ ( self : Dict )-> Dict:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowercase_ )
def snake_case__ ( self : Tuple )-> List[Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase_ )
def snake_case__ ( self : Any )-> List[str]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase_ )
def snake_case__ ( self : str )-> Tuple:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_ )
@unittest.skip(reason='Feed forward chunking is not implemented' )
def snake_case__ ( self : Union[str, Any] )-> Optional[Any]:
'''simple docstring'''
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def snake_case__ ( self : List[str],lowercase_ : Any )-> List[str]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = ids_tensor([1, 1_0],config.vocab_size )
A__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )],config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
A__ = GPTNeoXModel(lowercase_ )
original_model.to(lowercase_ )
original_model.eval()
A__ = original_model(lowercase_ ).last_hidden_state
A__ = original_model(lowercase_ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
A__ = {'type': scaling_type, 'factor': 10.0}
A__ = GPTNeoXModel(lowercase_ )
scaled_model.to(lowercase_ )
scaled_model.eval()
A__ = scaled_model(lowercase_ ).last_hidden_state
A__ = scaled_model(lowercase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowercase_,lowercase_,atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(lowercase_,lowercase_,atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowercase_,lowercase_,atol=1E-5 ) )
@require_torch
class A ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case__ ( self : Tuple )-> Union[str, Any]:
'''simple docstring'''
A__ = AutoTokenizer.from_pretrained('EleutherAI/pythia-410m-deduped' )
for checkpointing in [True, False]:
A__ = GPTNeoXForCausalLM.from_pretrained('EleutherAI/pythia-410m-deduped' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(lowercase_ )
A__ = tokenizer('My favorite food is',return_tensors='pt' ).to(lowercase_ )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
A__ = 'My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'
A__ = model.generate(**lowercase_,do_sample=lowercase_,max_new_tokens=2_0 )
A__ = tokenizer.batch_decode(lowercase_ )[0]
self.assertEqual(lowercase_,lowercase_ )
| 7
| 0
|
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class snake_case_( a__ ):
__UpperCamelCase = '''Speech2TextFeatureExtractor'''
__UpperCamelCase = '''Speech2TextTokenizer'''
def __init__( self : Optional[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : int ):
super().__init__(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = self.feature_extractor
lowerCAmelCase : List[Any] = False
def __call__( self : Optional[int] , *UpperCamelCase_ : List[Any] , **UpperCamelCase_ : Dict ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*UpperCamelCase_ , **UpperCamelCase_ )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
lowerCAmelCase : Optional[int] = kwargs.pop('''raw_speech''' )
else:
lowerCAmelCase : Optional[Any] = kwargs.pop('''audio''' , UpperCamelCase_ )
lowerCAmelCase : Any = kwargs.pop('''sampling_rate''' , UpperCamelCase_ )
lowerCAmelCase : Optional[int] = kwargs.pop('''text''' , UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
lowerCAmelCase : Dict = args[0]
lowerCAmelCase : Dict = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
lowerCAmelCase : List[str] = self.feature_extractor(UpperCamelCase_ , *UpperCamelCase_ , sampling_rate=UpperCamelCase_ , **UpperCamelCase_ )
if text is not None:
lowerCAmelCase : Any = self.tokenizer(UpperCamelCase_ , **UpperCamelCase_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCAmelCase : Tuple = encodings['''input_ids''']
return inputs
def lowerCamelCase__ ( self : Optional[int] , *UpperCamelCase_ : str , **UpperCamelCase_ : Tuple ):
return self.tokenizer.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[int] , *UpperCamelCase_ : int , **UpperCamelCase_ : Optional[Any] ):
return self.tokenizer.decode(*UpperCamelCase_ , **UpperCamelCase_ )
@contextmanager
def lowerCamelCase__ ( self : Tuple ):
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
lowerCAmelCase : List[Any] = True
lowerCAmelCase : Dict = self.tokenizer
yield
lowerCAmelCase : Optional[Any] = self.feature_extractor
lowerCAmelCase : Optional[Any] = False
| 350
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : Any = logging.get_logger(__name__)
snake_case__ : Any = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class snake_case_( a__ ):
__UpperCamelCase = '''vit_msn'''
def __init__( self : Dict , UpperCamelCase_ : str=7_6_8 , UpperCamelCase_ : List[Any]=1_2 , UpperCamelCase_ : Optional[Any]=1_2 , UpperCamelCase_ : str=3_0_7_2 , UpperCamelCase_ : List[Any]="gelu" , UpperCamelCase_ : List[Any]=0.0 , UpperCamelCase_ : Any=0.0 , UpperCamelCase_ : List[str]=0.02 , UpperCamelCase_ : List[Any]=1E-06 , UpperCamelCase_ : Tuple=2_2_4 , UpperCamelCase_ : Union[str, Any]=1_6 , UpperCamelCase_ : List[Any]=3 , UpperCamelCase_ : Any=True , **UpperCamelCase_ : Union[str, Any] , ):
super().__init__(**UpperCamelCase_ )
lowerCAmelCase : Any = hidden_size
lowerCAmelCase : Tuple = num_hidden_layers
lowerCAmelCase : List[Any] = num_attention_heads
lowerCAmelCase : Any = intermediate_size
lowerCAmelCase : Dict = hidden_act
lowerCAmelCase : int = hidden_dropout_prob
lowerCAmelCase : List[str] = attention_probs_dropout_prob
lowerCAmelCase : Tuple = initializer_range
lowerCAmelCase : Union[str, Any] = layer_norm_eps
lowerCAmelCase : Tuple = image_size
lowerCAmelCase : List[str] = patch_size
lowerCAmelCase : int = num_channels
lowerCAmelCase : Optional[int] = qkv_bias
| 314
| 0
|
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
_lowerCamelCase : Tuple = {
"""169M""": 12,
"""430M""": 24,
"""1B5""": 24,
"""3B""": 32,
"""7B""": 32,
"""14B""": 40,
}
_lowerCamelCase : List[Any] = {
"""169M""": 768,
"""430M""": 1024,
"""1B5""": 2048,
"""3B""": 2560,
"""7B""": 4096,
"""14B""": 5120,
}
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
"""simple docstring"""
A__ = list(state_dict.keys() )
for name in state_dict_keys:
A__ = state_dict.pop(lowercase_ )
# emb -> embedding
if name.startswith('''emb.''' ):
A__ = name.replace('''emb.''' , '''embeddings.''' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('''blocks.0.ln0''' ):
A__ = name.replace('''blocks.0.ln0''' , '''blocks.0.pre_ln''' )
# att -> attention
A__ = re.sub(R'''blocks\.(\d+)\.att''' , R'''blocks.\1.attention''' , lowercase_ )
# ffn -> feed_forward
A__ = re.sub(R'''blocks\.(\d+)\.ffn''' , R'''blocks.\1.feed_forward''' , lowercase_ )
# time_mix_k -> time_mix_key and reshape
if name.endswith('''.time_mix_k''' ):
A__ = name.replace('''.time_mix_k''' , '''.time_mix_key''' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('''.time_mix_v''' ):
A__ = name.replace('''.time_mix_v''' , '''.time_mix_value''' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('''.time_mix_r''' ):
A__ = name.replace('''.time_mix_r''' , '''.time_mix_receptance''' )
if name != "head.weight":
A__ = '''rwkv.''' + name
A__ = weight
return state_dict
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=False , lowercase_=None ) -> Any:
"""simple docstring"""
if tokenizer_file is None:
print('''No `--tokenizer_file` provided, we will use the default tokenizer.''' )
A__ = 50_277
A__ = AutoTokenizer.from_pretrained('''EleutherAI/gpt-neox-20b''' )
else:
A__ = PreTrainedTokenizerFast(tokenizer_file=lowercase_ )
A__ = len(lowercase_ )
tokenizer.save_pretrained(lowercase_ )
# 2. Build the config
A__ = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
A__ = candidate
break
if size is None:
raise ValueError('''Could not infer the size, please provide it with the `--size` argument.''' )
if size not in possible_sizes:
raise ValueError(f"""`size` should be one of {possible_sizes}, got {size}.""" )
A__ = RwkvConfig(
vocab_size=lowercase_ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(lowercase_ )
# 3. Download model file then convert state_dict
A__ = hf_hub_download(lowercase_ , lowercase_ )
A__ = torch.load(lowercase_ , map_location='''cpu''' )
A__ = convert_state_dict(lowercase_ )
# 4. Split in shards and save
A__ , A__ = shard_checkpoint(lowercase_ )
for shard_file, shard in shards.items():
torch.save(lowercase_ , os.path.join(lowercase_ , lowercase_ ) )
if index is not None:
A__ = os.path.join(lowercase_ , lowercase_ )
# Save the index as well
with open(lowercase_ , '''w''' , encoding='''utf-8''' ) as f:
A__ = json.dumps(lowercase_ , indent=2 , sort_keys=lowercase_ ) + '''\n'''
f.write(lowercase_ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'''Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.''' )
A__ = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
A__ = torch.load(os.path.join(lowercase_ , lowercase_ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(lowercase_ , lowercase_ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('''Please provide a `model_name` to push the model to the Hub.''' )
A__ = AutoModelForCausalLM.from_pretrained(lowercase_ )
model.push_to_hub(lowercase_ , max_shard_size='''2GB''' )
tokenizer.push_to_hub(lowercase_ )
if __name__ == "__main__":
_lowerCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--repo_id""", default=None, type=str, required=True, help="""Repo ID from which to pull the checkpoint."""
)
parser.add_argument(
"""--checkpoint_file""", default=None, type=str, required=True, help="""Name of the checkpoint file in the repo."""
)
parser.add_argument(
"""--output_dir""", default=None, type=str, required=True, help="""Where to save the converted model."""
)
parser.add_argument(
"""--tokenizer_file""",
default=None,
type=str,
help="""Path to the tokenizer file to use (if not provided, only the model is converted).""",
)
parser.add_argument(
"""--size""",
default=None,
type=str,
help="""Size of the model. Will be inferred from the `checkpoint_file` if not passed.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Push to the Hub the converted model.""",
)
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""Name of the pushed model on the Hub, including the username / organization.""",
)
_lowerCamelCase : str = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 14
|
import os
import sys
import unittest
_lowerCamelCase : Optional[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
_lowerCamelCase : Any = os.path.join("""tests""", """models""", """bert""", """test_modeling_bert.py""")
_lowerCamelCase : str = os.path.join("""tests""", """models""", """blip""", """test_modeling_blip.py""")
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Tuple:
'''simple docstring'''
A__ = get_test_to_tester_mapping(UpperCAmelCase__)
A__ = get_test_to_tester_mapping(UpperCAmelCase__)
A__ = {'''BertModelTest''': '''BertModelTester'''}
A__ = {
'''BlipModelTest''': '''BlipModelTester''',
'''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''',
'''BlipTextModelTest''': '''BlipTextModelTester''',
'''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''',
'''BlipVQAModelTest''': '''BlipVQAModelTester''',
'''BlipVisionModelTest''': '''BlipVisionModelTester''',
}
self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__)
self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[Any]:
'''simple docstring'''
A__ = get_model_to_test_mapping(UpperCAmelCase__)
A__ = get_model_to_test_mapping(UpperCAmelCase__)
A__ = {
'''BertForMaskedLM''': ['''BertModelTest'''],
'''BertForMultipleChoice''': ['''BertModelTest'''],
'''BertForNextSentencePrediction''': ['''BertModelTest'''],
'''BertForPreTraining''': ['''BertModelTest'''],
'''BertForQuestionAnswering''': ['''BertModelTest'''],
'''BertForSequenceClassification''': ['''BertModelTest'''],
'''BertForTokenClassification''': ['''BertModelTest'''],
'''BertLMHeadModel''': ['''BertModelTest'''],
'''BertModel''': ['''BertModelTest'''],
}
A__ = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''],
'''BlipModel''': ['''BlipModelTest'''],
'''BlipTextModel''': ['''BlipTextModelTest'''],
'''BlipVisionModel''': ['''BlipVisionModelTest'''],
}
self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__)
self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->str:
'''simple docstring'''
A__ = get_model_to_tester_mapping(UpperCAmelCase__)
A__ = get_model_to_tester_mapping(UpperCAmelCase__)
A__ = {
'''BertForMaskedLM''': ['''BertModelTester'''],
'''BertForMultipleChoice''': ['''BertModelTester'''],
'''BertForNextSentencePrediction''': ['''BertModelTester'''],
'''BertForPreTraining''': ['''BertModelTester'''],
'''BertForQuestionAnswering''': ['''BertModelTester'''],
'''BertForSequenceClassification''': ['''BertModelTester'''],
'''BertForTokenClassification''': ['''BertModelTester'''],
'''BertLMHeadModel''': ['''BertModelTester'''],
'''BertModel''': ['''BertModelTester'''],
}
A__ = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''],
'''BlipModel''': ['''BlipModelTester'''],
'''BlipTextModel''': ['''BlipTextModelTester'''],
'''BlipVisionModel''': ['''BlipVisionModelTester'''],
}
self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__)
self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__)
| 14
| 1
|
def UpperCamelCase( lowercase_ , lowercase_ ) -> bool:
'''simple docstring'''
snake_case_ = len(lowercase_ ) + 1
snake_case_ = len(lowercase_ ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
snake_case_ = [[0 for i in range(lowercase_ )] for j in range(lowercase_ )]
# since string of zero length match pattern of zero length
snake_case_ = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , lowercase_ ):
snake_case_ = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , lowercase_ ):
snake_case_ = dp[0][j - 2] if pattern[j - 1] == """*""" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , lowercase_ ):
for j in range(1 , lowercase_ ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
snake_case_ = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
snake_case_ = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
snake_case_ = dp[i - 1][j]
else:
snake_case_ = 0
else:
snake_case_ = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
lowerCamelCase_ = '''aab'''
lowerCamelCase_ = '''c*a*b'''
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f"""{input_string} matches the given pattern {pattern}""")
else:
print(f"""{input_string} does not match with the given pattern {pattern}""")
| 353
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class __lowerCamelCase ( __snake_case ):
lowerCamelCase_ : Tuple = 'levit'
def __init__( self , lowerCamelCase=224 , lowerCamelCase=3 , lowerCamelCase=3 , lowerCamelCase=2 , lowerCamelCase=1 , lowerCamelCase=16 , lowerCamelCase=[128, 256, 384] , lowerCamelCase=[4, 8, 12] , lowerCamelCase=[4, 4, 4] , lowerCamelCase=[16, 16, 16] , lowerCamelCase=0 , lowerCamelCase=[2, 2, 2] , lowerCamelCase=[2, 2, 2] , lowerCamelCase=0.02 , **lowerCamelCase , ) -> Tuple:
super().__init__(**lowerCamelCase )
snake_case_ = image_size
snake_case_ = num_channels
snake_case_ = kernel_size
snake_case_ = stride
snake_case_ = padding
snake_case_ = hidden_sizes
snake_case_ = num_attention_heads
snake_case_ = depths
snake_case_ = key_dim
snake_case_ = drop_path_rate
snake_case_ = patch_size
snake_case_ = attention_ratio
snake_case_ = mlp_ratio
snake_case_ = initializer_range
snake_case_ = [
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class __lowerCamelCase ( __snake_case ):
lowerCamelCase_ : Any = version.parse('1.11' )
@property
def lowerCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase_ ( self ) -> float:
return 1e-4
| 34
| 0
|
'''simple docstring'''
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser(
description=(
"Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"
" Distillation"
)
)
parser.add_argument("--model_type", default="roberta", choices=["roberta", "gpt2"])
parser.add_argument("--model_name", default="roberta-large", type=str)
parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_roberta_048131723.pth", type=str)
parser.add_argument("--vocab_transform", action="store_true")
_SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
if args.model_type == "roberta":
_SCREAMING_SNAKE_CASE : int = RobertaForMaskedLM.from_pretrained(args.model_name)
_SCREAMING_SNAKE_CASE : List[Any] = "roberta"
elif args.model_type == "gpt2":
_SCREAMING_SNAKE_CASE : Tuple = GPTaLMHeadModel.from_pretrained(args.model_name)
_SCREAMING_SNAKE_CASE : Any = "transformer"
_SCREAMING_SNAKE_CASE : Tuple = model.state_dict()
_SCREAMING_SNAKE_CASE : str = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
_SCREAMING_SNAKE_CASE : Dict = state_dict[F"{prefix}.{param_name}"]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
_SCREAMING_SNAKE_CASE : Optional[int] = F"{prefix}.embeddings.{w}.weight"
_SCREAMING_SNAKE_CASE : str = state_dict[param_name]
for w in ["weight", "bias"]:
_SCREAMING_SNAKE_CASE : Dict = F"{prefix}.embeddings.LayerNorm.{w}"
_SCREAMING_SNAKE_CASE : Dict = state_dict[param_name]
# Transformer Blocks #
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
_SCREAMING_SNAKE_CASE : Any = state_dict[
F"{prefix}.h.{teacher_idx}.{layer}.{w}"
]
_SCREAMING_SNAKE_CASE : Dict = state_dict[F"{prefix}.h.{teacher_idx}.attn.bias"]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict[F"{layer}"]
if args.vocab_transform:
for w in ["weight", "bias"]:
_SCREAMING_SNAKE_CASE : List[str] = state_dict[F"lm_head.dense.{w}"]
_SCREAMING_SNAKE_CASE : Dict = state_dict[F"lm_head.layer_norm.{w}"]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
_SCREAMING_SNAKE_CASE : List[Any] = state_dict[F"{prefix}.ln_f.{w}"]
_SCREAMING_SNAKE_CASE : Optional[Any] = state_dict["lm_head.weight"]
print(F"N layers selected for distillation: {std_idx}")
print(F"Number of params transferred for distillation: {len(compressed_sd.keys())}")
print(F"Save transferred checkpoint to {args.dump_checkpoint}.")
torch.save(compressed_sd, args.dump_checkpoint)
| 85
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
A_ : int = {"""tokenization_bertweet""": ["""BertweetTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
A_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 215
| 0
|
def UpperCamelCase ( __lowerCamelCase : list[int] ):
snake_case : Union[str, Any] = len(__lowerCamelCase )
for i in range(__lowerCamelCase ):
for j in range(i + 1 , __lowerCamelCase ):
if numbers[j] < numbers[i]:
snake_case , snake_case : Tuple = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
__lowerCamelCase = input("""Enter numbers separated by a comma:\n""").strip()
__lowerCamelCase = [int(item) for item in user_input.split(""",""")]
print(exchange_sort(unsorted))
| 10
|
import os
import string
import sys
__lowerCamelCase = 1 << 8
__lowerCamelCase = {
"""tab""": ord("""\t"""),
"""newline""": ord("""\r"""),
"""esc""": 27,
"""up""": 65 + ARROW_KEY_FLAG,
"""down""": 66 + ARROW_KEY_FLAG,
"""right""": 67 + ARROW_KEY_FLAG,
"""left""": 68 + ARROW_KEY_FLAG,
"""mod_int""": 91,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 50,
"""delete""": 51,
"""pg_up""": 53,
"""pg_down""": 54,
}
__lowerCamelCase = KEYMAP["""up"""]
__lowerCamelCase = KEYMAP["""left"""]
if sys.platform == "win32":
__lowerCamelCase = []
__lowerCamelCase = {
B"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
B"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
B"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
B"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
B"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
B"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
B"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
B"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(10):
__lowerCamelCase = ord(str(i))
def UpperCamelCase ( ):
if os.name == "nt":
import msvcrt
snake_case : str = "mbcs"
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(__lowerCamelCase ) == 0:
# Read the keystroke
snake_case : Optional[int] = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
snake_case : Any = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
snake_case : int = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["mod_int"] ) )
WIN_CH_BUFFER.append(__lowerCamelCase )
if ord(__lowerCamelCase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
snake_case : List[str] = chr(KEYMAP["esc"] )
except KeyError:
snake_case : Optional[Any] = cha[1]
else:
snake_case : Any = ch.decode(__lowerCamelCase )
else:
snake_case : Optional[Any] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
snake_case : Union[str, Any] = sys.stdin.fileno()
snake_case : Optional[Any] = termios.tcgetattr(__lowerCamelCase )
try:
tty.setraw(__lowerCamelCase )
snake_case : Union[str, Any] = sys.stdin.read(1 )
finally:
termios.tcsetattr(__lowerCamelCase , termios.TCSADRAIN , __lowerCamelCase )
return ch
def UpperCamelCase ( ):
snake_case : int = get_raw_chars()
if ord(__lowerCamelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(__lowerCamelCase ) == KEYMAP["esc"]:
snake_case : Dict = get_raw_chars()
if ord(__lowerCamelCase ) == KEYMAP["mod_int"]:
snake_case : Any = get_raw_chars()
if ord(__lowerCamelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(__lowerCamelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(__lowerCamelCase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 10
| 1
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = 'philschmid/bart-large-cnn-samsum'
lowercase = (
'This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '
'and returns a summary of the text.'
)
lowercase = 'summarizer'
lowercase = AutoTokenizer
lowercase = AutoModelForSeqaSeqLM
lowercase = ['text']
lowercase = ['text']
def __lowercase ( self : Dict , lowerCamelCase : Dict ) -> Any:
return self.pre_processor(lowerCamelCase , return_tensors="""pt""" , truncation=lowerCamelCase )
def __lowercase ( self : Optional[Any] , lowerCamelCase : Tuple ) -> List[str]:
return self.model.generate(**lowerCamelCase )[0]
def __lowercase ( self : str , lowerCamelCase : Dict ) -> List[Any]:
return self.pre_processor.decode(lowerCamelCase , skip_special_tokens=lowerCamelCase , clean_up_tokenization_spaces=lowerCamelCase )
| 120
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : List[Any] = logging.get_logger(__name__)
__A : Optional[Any] = {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = 'gpt_neox'
def __init__( self : Optional[int] , lowerCamelCase : Tuple=5_04_32 , lowerCamelCase : Optional[int]=61_44 , lowerCamelCase : Tuple=44 , lowerCamelCase : Any=64 , lowerCamelCase : List[Any]=2_45_76 , lowerCamelCase : List[Any]="gelu" , lowerCamelCase : Optional[Any]=0.25 , lowerCamelCase : Any=1_00_00 , lowerCamelCase : Any=0.0 , lowerCamelCase : str=0.0 , lowerCamelCase : Optional[int]=0.1 , lowerCamelCase : List[Any]=20_48 , lowerCamelCase : List[Any]=0.02 , lowerCamelCase : Any=1E-5 , lowerCamelCase : Dict=True , lowerCamelCase : Optional[int]=0 , lowerCamelCase : List[str]=2 , lowerCamelCase : Dict=False , lowerCamelCase : Tuple=True , lowerCamelCase : Optional[int]=None , **lowerCamelCase : int , ) -> Optional[Any]:
super().__init__(bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
lowerCAmelCase_ : Tuple = vocab_size
lowerCAmelCase_ : Union[str, Any] = max_position_embeddings
lowerCAmelCase_ : Any = hidden_size
lowerCAmelCase_ : List[Any] = num_hidden_layers
lowerCAmelCase_ : Optional[int] = num_attention_heads
lowerCAmelCase_ : str = intermediate_size
lowerCAmelCase_ : int = hidden_act
lowerCAmelCase_ : List[Any] = rotary_pct
lowerCAmelCase_ : Any = rotary_emb_base
lowerCAmelCase_ : List[str] = attention_dropout
lowerCAmelCase_ : Union[str, Any] = hidden_dropout
lowerCAmelCase_ : Tuple = classifier_dropout
lowerCAmelCase_ : Union[str, Any] = initializer_range
lowerCAmelCase_ : Any = layer_norm_eps
lowerCAmelCase_ : str = use_cache
lowerCAmelCase_ : str = tie_word_embeddings
lowerCAmelCase_ : str = use_parallel_residual
lowerCAmelCase_ : Any = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"""The hidden size is not divisble by the number of attention heads! Make sure to update them!""" )
def __lowercase ( self : List[str] ) -> List[str]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowerCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
F'got {self.rope_scaling}' )
lowerCAmelCase_ : Optional[Any] = self.rope_scaling.get("""type""" , lowerCamelCase )
lowerCAmelCase_ : int = self.rope_scaling.get("""factor""" , lowerCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(lowerCamelCase , lowerCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(F'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 120
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class _A ( unittest.TestCase ):
def lowercase__ ( self : str , __magic_name__ : int , __magic_name__ : int ) -> Optional[int]:
"""simple docstring"""
__snake_case : Any = jnp.ones((batch_size, length) ) / length
return scores
def lowercase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
__snake_case : Any = None
__snake_case : str = 20
__snake_case : List[str] = self._get_uniform_logits(batch_size=2 , length=__magic_name__ )
# tweak scores to not be uniform anymore
__snake_case : Union[str, Any] = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
__snake_case : Dict = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
__snake_case : str = jax.nn.softmax(__magic_name__ , axis=-1 )
__snake_case : Dict = FlaxTemperatureLogitsWarper(temperature=0.5 )
__snake_case : Tuple = FlaxTemperatureLogitsWarper(temperature=1.3 )
__snake_case : Optional[Any] = jax.nn.softmax(temp_dist_warper_sharper(__magic_name__ , scores.copy() , cur_len=__magic_name__ ) , axis=-1 )
__snake_case : str = jax.nn.softmax(temp_dist_warper_smoother(__magic_name__ , scores.copy() , cur_len=__magic_name__ ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def lowercase__ ( self : int ) -> List[Any]:
"""simple docstring"""
__snake_case : Any = None
__snake_case : Optional[int] = 10
__snake_case : int = 2
# create ramp distribution
__snake_case : str = np.broadcast_to(np.arange(__magic_name__ )[None, :] , (batch_size, vocab_size) ).copy()
__snake_case : Tuple = ramp_logits[1:, : vocab_size // 2] + vocab_size
__snake_case : Union[str, Any] = FlaxTopKLogitsWarper(3 )
__snake_case : Optional[int] = top_k_warp(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
__snake_case : Dict = 5
__snake_case : List[Any] = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
__snake_case : str = np.broadcast_to(np.arange(__magic_name__ )[None, :] , (batch_size, length) ).copy()
__snake_case : Tuple = top_k_warp_safety_check(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def lowercase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__snake_case : Union[str, Any] = None
__snake_case : Optional[int] = 10
__snake_case : List[str] = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
__snake_case : Optional[Any] = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
__snake_case : Any = FlaxTopPLogitsWarper(0.8 )
__snake_case : List[str] = np.exp(top_p_warp(__magic_name__ , __magic_name__ , cur_len=__magic_name__ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
__snake_case : Any = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) )
# check edge cases with negative and extreme logits
__snake_case : Optional[int] = np.broadcast_to(np.arange(__magic_name__ )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
__snake_case : Union[str, Any] = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
__snake_case : Dict = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
__snake_case : Dict = top_p_warp(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Dict = 20
__snake_case : Dict = 4
__snake_case : int = 0
__snake_case : Optional[Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__magic_name__ )
# check that min length is applied at length 5
__snake_case : int = ids_tensor((batch_size, 20) , vocab_size=20 )
__snake_case : Optional[int] = 5
__snake_case : Any = self._get_uniform_logits(__magic_name__ , __magic_name__ )
__snake_case : Optional[Any] = min_dist_processor(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("""inf""" )] )
# check that min length is not applied anymore at length 15
__snake_case : Optional[int] = self._get_uniform_logits(__magic_name__ , __magic_name__ )
__snake_case : Tuple = 15
__snake_case : Dict = min_dist_processor(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
self.assertFalse(jnp.isinf(__magic_name__ ).any() )
def lowercase__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Dict = 20
__snake_case : str = 4
__snake_case : List[str] = 0
__snake_case : Optional[int] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__magic_name__ )
# check that all scores are -inf except the bos_token_id score
__snake_case : Union[str, Any] = ids_tensor((batch_size, 1) , vocab_size=20 )
__snake_case : Tuple = 1
__snake_case : Union[str, Any] = self._get_uniform_logits(__magic_name__ , __magic_name__ )
__snake_case : Tuple = logits_processor(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
__snake_case : Optional[Any] = 3
__snake_case : Any = self._get_uniform_logits(__magic_name__ , __magic_name__ )
__snake_case : Optional[int] = logits_processor(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
self.assertFalse(jnp.isinf(__magic_name__ ).any() )
def lowercase__ ( self : Dict ) -> str:
"""simple docstring"""
__snake_case : Optional[int] = 20
__snake_case : Dict = 4
__snake_case : Optional[Any] = 0
__snake_case : Optional[Any] = 5
__snake_case : Any = FlaxForcedEOSTokenLogitsProcessor(max_length=__magic_name__ , eos_token_id=__magic_name__ )
# check that all scores are -inf except the eos_token_id when max_length is reached
__snake_case : Any = ids_tensor((batch_size, 4) , vocab_size=20 )
__snake_case : List[str] = 4
__snake_case : Tuple = self._get_uniform_logits(__magic_name__ , __magic_name__ )
__snake_case : Union[str, Any] = logits_processor(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
__snake_case : int = 3
__snake_case : Optional[Any] = self._get_uniform_logits(__magic_name__ , __magic_name__ )
__snake_case : Any = logits_processor(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
self.assertFalse(jnp.isinf(__magic_name__ ).any() )
def lowercase__ ( self : List[str] ) -> str:
"""simple docstring"""
__snake_case : Dict = 4
__snake_case : Union[str, Any] = 10
__snake_case : List[Any] = 15
__snake_case : List[Any] = 2
__snake_case : Any = 1
__snake_case : Optional[Any] = 15
# dummy input_ids and scores
__snake_case : int = ids_tensor((batch_size, sequence_length) , __magic_name__ )
__snake_case : Tuple = input_ids.copy()
__snake_case : int = self._get_uniform_logits(__magic_name__ , __magic_name__ )
__snake_case : Optional[int] = scores.copy()
# instantiate all dist processors
__snake_case : List[str] = FlaxTemperatureLogitsWarper(temperature=0.5 )
__snake_case : List[Any] = FlaxTopKLogitsWarper(3 )
__snake_case : Optional[int] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
__snake_case : str = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__magic_name__ )
__snake_case : List[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__magic_name__ )
__snake_case : List[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=__magic_name__ , eos_token_id=__magic_name__ )
__snake_case : Union[str, Any] = 10
# no processor list
__snake_case : int = temp_dist_warp(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
__snake_case : Dict = top_k_warp(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
__snake_case : List[Any] = top_p_warp(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
__snake_case : int = min_dist_proc(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
__snake_case : Optional[Any] = bos_dist_proc(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
__snake_case : Any = eos_dist_proc(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
# with processor list
__snake_case : Optional[int] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
__snake_case : Optional[int] = processor(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
# scores should be equal
self.assertTrue(jnp.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def lowercase__ ( self : Dict ) -> Any:
"""simple docstring"""
__snake_case : Any = 4
__snake_case : Optional[int] = 10
__snake_case : Dict = 15
__snake_case : Tuple = 2
__snake_case : Union[str, Any] = 1
__snake_case : int = 15
# dummy input_ids and scores
__snake_case : Any = ids_tensor((batch_size, sequence_length) , __magic_name__ )
__snake_case : Optional[Any] = input_ids.copy()
__snake_case : Dict = self._get_uniform_logits(__magic_name__ , __magic_name__ )
__snake_case : Any = scores.copy()
# instantiate all dist processors
__snake_case : Any = FlaxTemperatureLogitsWarper(temperature=0.5 )
__snake_case : str = FlaxTopKLogitsWarper(3 )
__snake_case : int = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
__snake_case : Union[str, Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__magic_name__ )
__snake_case : List[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__magic_name__ )
__snake_case : List[str] = FlaxForcedEOSTokenLogitsProcessor(max_length=__magic_name__ , eos_token_id=__magic_name__ )
__snake_case : str = 10
# no processor list
def run_no_processor_list(__magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] ):
__snake_case : Union[str, Any] = temp_dist_warp(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
__snake_case : List[Any] = top_k_warp(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
__snake_case : Union[str, Any] = top_p_warp(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
__snake_case : str = min_dist_proc(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
__snake_case : str = bos_dist_proc(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
__snake_case : Optional[Any] = eos_dist_proc(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
return scores
# with processor list
def run_processor_list(__magic_name__ : Dict , __magic_name__ : List[str] , __magic_name__ : Tuple ):
__snake_case : Any = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
__snake_case : str = processor(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
return scores
__snake_case : Optional[int] = jax.jit(__magic_name__ )
__snake_case : Optional[int] = jax.jit(__magic_name__ )
__snake_case : Any = jitted_run_no_processor_list(__magic_name__ , __magic_name__ , __magic_name__ )
__snake_case : Dict = jitted_run_processor_list(__magic_name__ , __magic_name__ , __magic_name__ )
# scores should be equal
self.assertTrue(jnp.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 13
|
'''simple docstring'''
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 13
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowerCamelCase_ = {"tokenization_bertweet": ["BertweetTokenizer"]}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 191
|
"""simple docstring"""
def __lowerCamelCase ( a_ : int ) -> bool:
if not isinstance(a_ , a_ ):
__SCREAMING_SNAKE_CASE :int = f'''Input value of [number={number}] must be an integer'''
raise TypeError(a_ )
if number < 0:
return False
__SCREAMING_SNAKE_CASE :Any = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 191
| 1
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
UpperCamelCase_ =pd.read_csv("""sample_data.csv""", header=None)
UpperCamelCase_ =df.shape[:1][0]
# If you're using some other dataset input the target column
UpperCamelCase_ =df.iloc[:, 1:2]
UpperCamelCase_ =actual_data.values.reshape(len_data, 1)
UpperCamelCase_ =MinMaxScaler().fit_transform(actual_data)
UpperCamelCase_ =10
UpperCamelCase_ =5
UpperCamelCase_ =20
UpperCamelCase_ =len_data - periods * look_back
UpperCamelCase_ =actual_data[:division]
UpperCamelCase_ =actual_data[division - look_back :]
UpperCamelCase_ , UpperCamelCase_ =[], []
UpperCamelCase_ , UpperCamelCase_ =[], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
UpperCamelCase_ =np.array(train_x)
UpperCamelCase_ =np.array(test_x)
UpperCamelCase_ =np.array([list(i.ravel()) for i in train_y])
UpperCamelCase_ =np.array([list(i.ravel()) for i in test_y])
UpperCamelCase_ =Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss="""mean_squared_error""", optimizer="""adam""")
UpperCamelCase_ =model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
UpperCamelCase_ =model.predict(x_test)
| 367
|
"""simple docstring"""
import argparse
import os
import re
import packaging.version
UpperCamelCase_ ="""examples/"""
UpperCamelCase_ ={
"""examples""": (re.compile(r"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(r"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(r"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), r"""\1version=\"VERSION\","""),
"""doc""": (re.compile(r"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
UpperCamelCase_ ={
"""init""": """src/transformers/__init__.py""",
"""setup""": """setup.py""",
}
UpperCamelCase_ ="""README.md"""
def a_ ( _lowercase , _lowercase , _lowercase ):
with open(_lowercase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
_UpperCamelCase : Tuple = f.read()
_UpperCamelCase , _UpperCamelCase : List[Any] = REPLACE_PATTERNS[pattern]
_UpperCamelCase : Optional[Any] = replace.replace('''VERSION''' , _lowercase )
_UpperCamelCase : List[Any] = re_pattern.sub(_lowercase , _lowercase )
with open(_lowercase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(_lowercase )
def a_ ( _lowercase ):
for folder, directories, fnames in os.walk(_lowercase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(_lowercase , _lowercase ) , _lowercase , pattern='''examples''' )
def a_ ( _lowercase , _lowercase=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_lowercase , _lowercase , _lowercase )
if not patch:
update_version_in_examples(_lowercase )
def a_ ( ):
_UpperCamelCase : Any = '''🤗 Transformers currently provides the following architectures'''
_UpperCamelCase : List[str] = '''1. Want to contribute a new model?'''
with open(_lowercase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
_UpperCamelCase : List[Any] = f.readlines()
# Find the start of the list.
_UpperCamelCase : Optional[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
_UpperCamelCase : Any = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
_UpperCamelCase : Tuple = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(_lowercase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(_lowercase )
def a_ ( ):
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
_UpperCamelCase : List[Any] = f.read()
_UpperCamelCase : List[Any] = REPLACE_PATTERNS['''init'''][0].search(_lowercase ).groups()[0]
return packaging.version.parse(_lowercase )
def a_ ( _lowercase=False ):
_UpperCamelCase : Union[str, Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
_UpperCamelCase : List[str] = default_version.base_version
elif patch:
_UpperCamelCase : Union[str, Any] = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
_UpperCamelCase : str = F"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
_UpperCamelCase : Optional[int] = input(F"""Which version are you releasing? [{default_version}]""" )
if len(_lowercase ) == 0:
_UpperCamelCase : str = default_version
print(F"""Updating version to {version}.""" )
global_version_update(_lowercase , patch=_lowercase )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def a_ ( ):
_UpperCamelCase : Any = get_version()
_UpperCamelCase : Dict = F"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
_UpperCamelCase : Union[str, Any] = current_version.base_version
# Check with the user we got that right.
_UpperCamelCase : int = input(F"""Which version are we developing now? [{dev_version}]""" )
if len(_lowercase ) == 0:
_UpperCamelCase : List[str] = dev_version
print(F"""Updating version to {version}.""" )
global_version_update(_lowercase )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
UpperCamelCase_ =argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
UpperCamelCase_ =parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 128
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : Tuple ={'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str =[
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
A__ : List[str] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 70
|
'''simple docstring'''
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
_lowercase : Optional[int] = "\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n"
_lowercase : List[Any] = "\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n"
_lowercase : List[Any] = "\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric('mauve')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class __magic_name__ ( datasets.Metric):
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/krishnap25/mauve""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/krishnap25/mauve"""] , reference_urls=[
"""https://arxiv.org/abs/2102.01454""",
"""https://github.com/krishnap25/mauve""",
] , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : Any , lowercase_ : Any=None , lowercase_ : str=None , lowercase_ : Dict=None , lowercase_ : Any=None , lowercase_ : int="auto" , lowercase_ : Tuple=-1 , lowercase_ : str=0.9 , lowercase_ : Union[str, Any]=5 , lowercase_ : List[str]=500 , lowercase_ : Union[str, Any]="gpt2-large" , lowercase_ : List[Any]=-1 , lowercase_ : str=1024 , lowercase_ : List[str]=25 , lowercase_ : str=5 , lowercase_ : List[Any]=True , lowercase_ : Tuple=25 , ):
lowercase_ : List[str] = compute_mauve(
p_text=lowercase_ , q_text=lowercase_ , p_features=lowercase_ , q_features=lowercase_ , p_tokens=lowercase_ , q_tokens=lowercase_ , num_buckets=lowercase_ , pca_max_data=lowercase_ , kmeans_explained_var=lowercase_ , kmeans_num_redo=lowercase_ , kmeans_max_iter=lowercase_ , featurize_model_name=lowercase_ , device_id=lowercase_ , max_text_length=lowercase_ , divergence_curve_discretization_size=lowercase_ , mauve_scaling_factor=lowercase_ , verbose=lowercase_ , seed=lowercase_ , )
return out
| 239
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case : Optional[int] = logging.get_logger(__name__)
snake_case : int = {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class _snake_case ( snake_case ):
UpperCamelCase__ = 'convbert'
def __init__( self , _a=30_522 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1e-12 , _a=1 , _a=0 , _a=2 , _a=768 , _a=2 , _a=9 , _a=1 , _a=None , **_a , ):
super().__init__(
pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a , )
__magic_name__ : Optional[int] = vocab_size
__magic_name__ : Union[str, Any] = hidden_size
__magic_name__ : Any = num_hidden_layers
__magic_name__ : str = num_attention_heads
__magic_name__ : Optional[int] = intermediate_size
__magic_name__ : Dict = hidden_act
__magic_name__ : Optional[Any] = hidden_dropout_prob
__magic_name__ : Tuple = attention_probs_dropout_prob
__magic_name__ : Any = max_position_embeddings
__magic_name__ : int = type_vocab_size
__magic_name__ : List[Any] = initializer_range
__magic_name__ : Dict = layer_norm_eps
__magic_name__ : List[str] = embedding_size
__magic_name__ : Dict = head_ratio
__magic_name__ : Union[str, Any] = conv_kernel_size
__magic_name__ : Any = num_groups
__magic_name__ : Any = classifier_dropout
class _snake_case ( snake_case ):
@property
def SCREAMING_SNAKE_CASE ( self ):
if self.task == "multiple-choice":
__magic_name__ : List[str] = {0: "batch", 1: "choice", 2: "sequence"}
else:
__magic_name__ : Any = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 41
|
from typing import Any
class _snake_case :
def __init__( self , _a ):
__magic_name__ : Union[str, Any] = data
__magic_name__ : str = None
class _snake_case :
def __init__( self ):
__magic_name__ : List[str] = None
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = self.head
while temp is not None:
print(temp.data , end=" " )
__magic_name__ : Optional[int] = temp.next
print()
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Union[str, Any] = Node(_a )
__magic_name__ : List[str] = self.head
__magic_name__ : Union[str, Any] = new_node
def SCREAMING_SNAKE_CASE ( self , _a , _a ):
if node_data_a == node_data_a:
return
else:
__magic_name__ : Optional[Any] = self.head
while node_a is not None and node_a.data != node_data_a:
__magic_name__ : Tuple = node_a.next
__magic_name__ : Dict = self.head
while node_a is not None and node_a.data != node_data_a:
__magic_name__ : List[Any] = node_a.next
if node_a is None or node_a is None:
return
__magic_name__ , __magic_name__ : Optional[int] = node_a.data, node_a.data
if __name__ == "__main__":
snake_case : Optional[int] = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("After swapping")
ll.print_list()
| 41
| 1
|
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
_lowerCamelCase ="\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n"
_lowerCamelCase ="\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n"
_lowerCamelCase ="\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric('mauve')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
"""simple docstring"""
def _lowerCAmelCase ( self : Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage='https://github.com/krishnap25/mauve' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('string' ,id='sequence' ),
'references': datasets.Value('string' ,id='sequence' ),
} ) ,codebase_urls=['https://github.com/krishnap25/mauve'] ,reference_urls=[
'https://arxiv.org/abs/2102.01454',
'https://github.com/krishnap25/mauve',
] ,)
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Optional[int] ,snake_case : str ,snake_case : List[str]=None ,snake_case : str=None ,snake_case : int=None ,snake_case : Union[str, Any]=None ,snake_case : Optional[int]="auto" ,snake_case : List[str]=-1 ,snake_case : Union[str, Any]=0.9 ,snake_case : Tuple=5 ,snake_case : Union[str, Any]=500 ,snake_case : Union[str, Any]="gpt2-large" ,snake_case : Union[str, Any]=-1 ,snake_case : Optional[Any]=1024 ,snake_case : Optional[Any]=25 ,snake_case : List[str]=5 ,snake_case : List[str]=True ,snake_case : Optional[Any]=25 ,):
SCREAMING_SNAKE_CASE =compute_mauve(
p_text=snake_case ,q_text=snake_case ,p_features=snake_case ,q_features=snake_case ,p_tokens=snake_case ,q_tokens=snake_case ,num_buckets=snake_case ,pca_max_data=snake_case ,kmeans_explained_var=snake_case ,kmeans_num_redo=snake_case ,kmeans_max_iter=snake_case ,featurize_model_name=snake_case ,device_id=snake_case ,max_text_length=snake_case ,divergence_curve_discretization_size=snake_case ,mauve_scaling_factor=snake_case ,verbose=snake_case ,seed=snake_case ,)
return out
| 334
|
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class a_ :
"""simple docstring"""
def __init__( self : Optional[int] ,snake_case : Any ,snake_case : Dict=100 ,snake_case : List[Any]=13 ,snake_case : str=30 ,snake_case : List[str]=2 ,snake_case : List[Any]=3 ,snake_case : Tuple=True ,snake_case : Optional[Any]=True ,snake_case : int=32 ,snake_case : Tuple=4 ,snake_case : List[Any]=4 ,snake_case : Optional[Any]=37 ,snake_case : Optional[Any]="gelu" ,snake_case : Tuple=0.1 ,snake_case : Union[str, Any]=0.1 ,snake_case : List[Any]=10 ,snake_case : Tuple=0.02 ,snake_case : List[str]=3 ,snake_case : Any=None ,snake_case : int=[0, 1, 2, 3] ,):
SCREAMING_SNAKE_CASE =parent
SCREAMING_SNAKE_CASE =100
SCREAMING_SNAKE_CASE =batch_size
SCREAMING_SNAKE_CASE =image_size
SCREAMING_SNAKE_CASE =patch_size
SCREAMING_SNAKE_CASE =num_channels
SCREAMING_SNAKE_CASE =is_training
SCREAMING_SNAKE_CASE =use_labels
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =hidden_dropout_prob
SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE =type_sequence_label_size
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =scope
SCREAMING_SNAKE_CASE =out_indices
SCREAMING_SNAKE_CASE =num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE =(image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE =num_patches + 1
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE =None
SCREAMING_SNAKE_CASE =None
if self.use_labels:
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
SCREAMING_SNAKE_CASE =self.get_config()
return config, pixel_values, labels, pixel_labels
def _lowerCAmelCase ( self : Dict ):
return BeitConfig(
vocab_size=self.vocab_size ,image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=snake_case ,initializer_range=self.initializer_range ,out_indices=self.out_indices ,)
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Tuple ,snake_case : Optional[Any] ,snake_case : Union[str, Any] ,snake_case : Optional[int] ):
SCREAMING_SNAKE_CASE =BeitModel(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Optional[int] ,snake_case : Dict ,snake_case : Any ,snake_case : List[str] ):
SCREAMING_SNAKE_CASE =BeitForMaskedImageModeling(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length - 1, self.vocab_size) )
def _lowerCAmelCase ( self : Optional[Any] ,snake_case : Any ,snake_case : str ,snake_case : Any ,snake_case : str ):
SCREAMING_SNAKE_CASE =self.type_sequence_label_size
SCREAMING_SNAKE_CASE =BeitForImageClassification(snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE =1
SCREAMING_SNAKE_CASE =BeitForImageClassification(snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE =model(snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _lowerCAmelCase ( self : List[str] ,snake_case : Tuple ,snake_case : str ,snake_case : Optional[int] ,snake_case : int ):
SCREAMING_SNAKE_CASE =self.num_labels
SCREAMING_SNAKE_CASE =BeitForSemanticSegmentation(snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
SCREAMING_SNAKE_CASE =model(snake_case ,labels=snake_case )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def _lowerCAmelCase ( self : str ):
SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =config_and_inputs
SCREAMING_SNAKE_CASE ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{
'feature-extraction': BeitModel,
'image-classification': BeitForImageClassification,
'image-segmentation': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =BeitModelTester(self )
SCREAMING_SNAKE_CASE =ConfigTester(self ,config_class=snake_case ,has_text_modality=snake_case ,hidden_size=37 )
def _lowerCAmelCase ( self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='BEiT does not use inputs_embeds' )
def _lowerCAmelCase ( self : List[Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def _lowerCAmelCase ( self : Union[str, Any] ):
pass
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE =model_class(snake_case )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
SCREAMING_SNAKE_CASE =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case ,nn.Linear ) )
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE =model_class(snake_case )
SCREAMING_SNAKE_CASE =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE =['pixel_values']
self.assertListEqual(arg_names[:1] ,snake_case )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case )
def _lowerCAmelCase ( self : Any ):
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE =True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(snake_case ), BeitForMaskedImageModeling]:
continue
SCREAMING_SNAKE_CASE =model_class(snake_case )
model.to(snake_case )
model.train()
SCREAMING_SNAKE_CASE =self._prepare_for_class(snake_case ,snake_case ,return_labels=snake_case )
SCREAMING_SNAKE_CASE =model(**snake_case ).loss
loss.backward()
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE =False
SCREAMING_SNAKE_CASE =True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(snake_case ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
SCREAMING_SNAKE_CASE =model_class(snake_case )
model.gradient_checkpointing_enable()
model.to(snake_case )
model.train()
SCREAMING_SNAKE_CASE =self._prepare_for_class(snake_case ,snake_case ,return_labels=snake_case )
SCREAMING_SNAKE_CASE =model(**snake_case ).loss
loss.backward()
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE =_config_zero_init(snake_case )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE =model_class(config=snake_case )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=f'Parameter {name} of model {model_class} seems not properly initialized' ,)
@slow
def _lowerCAmelCase ( self : List[str] ):
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE =BeitModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowerCAmelCase ( self : Tuple ):
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(snake_case )
SCREAMING_SNAKE_CASE =self.default_image_processor
SCREAMING_SNAKE_CASE =prepare_img()
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).pixel_values.to(snake_case )
# prepare bool_masked_pos
SCREAMING_SNAKE_CASE =torch.ones((1, 196) ,dtype=torch.bool ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(pixel_values=snake_case ,bool_masked_pos=snake_case )
SCREAMING_SNAKE_CASE =outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE =torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape ,snake_case )
SCREAMING_SNAKE_CASE =torch.tensor(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(snake_case )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] ,snake_case ,atol=1e-2 ) )
@slow
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(snake_case )
SCREAMING_SNAKE_CASE =self.default_image_processor
SCREAMING_SNAKE_CASE =prepare_img()
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**snake_case )
SCREAMING_SNAKE_CASE =outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE =torch.Size((1, 1000) )
self.assertEqual(logits.shape ,snake_case )
SCREAMING_SNAKE_CASE =torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(snake_case )
self.assertTrue(torch.allclose(logits[0, :3] ,snake_case ,atol=1e-4 ) )
SCREAMING_SNAKE_CASE =281
self.assertEqual(logits.argmax(-1 ).item() ,snake_case )
@slow
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to(
snake_case )
SCREAMING_SNAKE_CASE =self.default_image_processor
SCREAMING_SNAKE_CASE =prepare_img()
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**snake_case )
SCREAMING_SNAKE_CASE =outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE =torch.Size((1, 21841) )
self.assertEqual(logits.shape ,snake_case )
SCREAMING_SNAKE_CASE =torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(snake_case )
self.assertTrue(torch.allclose(logits[0, :3] ,snake_case ,atol=1e-4 ) )
SCREAMING_SNAKE_CASE =2396
self.assertEqual(logits.argmax(-1 ).item() ,snake_case )
@slow
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
SCREAMING_SNAKE_CASE =model.to(snake_case )
SCREAMING_SNAKE_CASE =BeitImageProcessor(do_resize=snake_case ,size=640 ,do_center_crop=snake_case )
SCREAMING_SNAKE_CASE =load_dataset('hf-internal-testing/fixtures_ade20k' ,split='test' )
SCREAMING_SNAKE_CASE =Image.open(ds[0]['file'] )
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**snake_case )
SCREAMING_SNAKE_CASE =outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE =torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape ,snake_case )
SCREAMING_SNAKE_CASE =version.parse(PIL.__version__ ) < version.parse('9.0.0' )
if is_pillow_less_than_a:
SCREAMING_SNAKE_CASE =torch.tensor(
[
[[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]],
[[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]],
[[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]],
] ,device=snake_case ,)
else:
SCREAMING_SNAKE_CASE =torch.tensor(
[
[[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]],
[[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]],
[[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]],
] ,device=snake_case ,)
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] ,snake_case ,atol=1e-4 ) )
@slow
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
SCREAMING_SNAKE_CASE =model.to(snake_case )
SCREAMING_SNAKE_CASE =BeitImageProcessor(do_resize=snake_case ,size=640 ,do_center_crop=snake_case )
SCREAMING_SNAKE_CASE =load_dataset('hf-internal-testing/fixtures_ade20k' ,split='test' )
SCREAMING_SNAKE_CASE =Image.open(ds[0]['file'] )
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**snake_case )
SCREAMING_SNAKE_CASE =outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE =image_processor.post_process_semantic_segmentation(outputs=snake_case ,target_sizes=[(500, 300)] )
SCREAMING_SNAKE_CASE =torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape ,snake_case )
SCREAMING_SNAKE_CASE =image_processor.post_process_semantic_segmentation(outputs=snake_case )
SCREAMING_SNAKE_CASE =torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape ,snake_case )
| 334
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ = logging.get_logger(__name__)
def __magic_name__ ( __a : str ):
'''simple docstring'''
UpperCamelCase__ = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=["""stage2""", """stage3""", """stage4"""] , )
UpperCamelCase__ = DetaConfig(
backbone_config=__a , num_queries=900 , encoder_ffn_dim=2_048 , decoder_ffn_dim=2_048 , num_feature_levels=5 , assign_first_stage=__a , with_box_refine=__a , two_stage=__a , )
# set labels
UpperCamelCase__ = """huggingface/label-files"""
if "o365" in model_name:
UpperCamelCase__ = 366
UpperCamelCase__ = """object365-id2label.json"""
else:
UpperCamelCase__ = 91
UpperCamelCase__ = """coco-detection-id2label.json"""
UpperCamelCase__ = num_labels
UpperCamelCase__ = json.load(open(cached_download(hf_hub_url(__a , __a , repo_type="""dataset""" ) ) , """r""" ) )
UpperCamelCase__ = {int(__a ): v for k, v in idalabel.items()}
UpperCamelCase__ = idalabel
UpperCamelCase__ = {v: k for k, v in idalabel.items()}
return config
def __magic_name__ ( __a : Any ):
'''simple docstring'''
UpperCamelCase__ = []
# stem
# fmt: off
rename_keys.append(("""backbone.0.body.patch_embed.proj.weight""", """model.backbone.model.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.0.body.patch_embed.proj.bias""", """model.backbone.model.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.0.body.patch_embed.norm.weight""", """model.backbone.model.embeddings.norm.weight""") )
rename_keys.append(("""backbone.0.body.patch_embed.norm.bias""", """model.backbone.model.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.norm1.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.norm1.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.norm2.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.norm2.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((f"backbone.0.body.layers.{i}.downsample.reduction.weight", f"model.backbone.model.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((f"backbone.0.body.layers.{i}.downsample.norm.weight", f"model.backbone.model.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((f"backbone.0.body.layers.{i}.downsample.norm.bias", f"model.backbone.model.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append(("""backbone.0.body.norm1.weight""", """model.backbone.model.hidden_states_norms.stage2.weight""") )
rename_keys.append(("""backbone.0.body.norm1.bias""", """model.backbone.model.hidden_states_norms.stage2.bias""") )
rename_keys.append(("""backbone.0.body.norm2.weight""", """model.backbone.model.hidden_states_norms.stage3.weight""") )
rename_keys.append(("""backbone.0.body.norm2.bias""", """model.backbone.model.hidden_states_norms.stage3.bias""") )
rename_keys.append(("""backbone.0.body.norm3.weight""", """model.backbone.model.hidden_states_norms.stage4.weight""") )
rename_keys.append(("""backbone.0.body.norm3.bias""", """model.backbone.model.hidden_states_norms.stage4.bias""") )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight", f"model.encoder.layers.{i}.self_attn.sampling_offsets.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias", f"model.encoder.layers.{i}.self_attn.sampling_offsets.bias") )
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.attention_weights.weight", f"model.encoder.layers.{i}.self_attn.attention_weights.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.attention_weights.bias", f"model.encoder.layers.{i}.self_attn.attention_weights.bias") )
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.value_proj.weight", f"model.encoder.layers.{i}.self_attn.value_proj.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.value_proj.bias", f"model.encoder.layers.{i}.self_attn.value_proj.bias") )
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.output_proj.weight", f"model.encoder.layers.{i}.self_attn.output_proj.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.output_proj.bias", f"model.encoder.layers.{i}.self_attn.output_proj.bias") )
rename_keys.append((f"transformer.encoder.layers.{i}.norm1.weight", f"model.encoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.norm1.bias", f"model.encoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"model.encoder.layers.{i}.fc1.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"model.encoder.layers.{i}.fc1.bias") )
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"model.encoder.layers.{i}.fc2.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"model.encoder.layers.{i}.fc2.bias") )
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.weight", f"model.encoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"model.encoder.layers.{i}.final_layer_norm.bias") )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight", f"model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias", f"model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.attention_weights.weight", f"model.decoder.layers.{i}.encoder_attn.attention_weights.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.attention_weights.bias", f"model.decoder.layers.{i}.encoder_attn.attention_weights.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.value_proj.weight", f"model.decoder.layers.{i}.encoder_attn.value_proj.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.value_proj.bias", f"model.decoder.layers.{i}.encoder_attn.value_proj.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.output_proj.weight", f"model.decoder.layers.{i}.encoder_attn.output_proj.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.output_proj.bias", f"model.decoder.layers.{i}.encoder_attn.output_proj.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.norm1.weight", f"model.decoder.layers.{i}.encoder_attn_layer_norm.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.norm1.bias", f"model.decoder.layers.{i}.encoder_attn_layer_norm.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"model.decoder.layers.{i}.self_attn.out_proj.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"model.decoder.layers.{i}.self_attn.out_proj.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.norm2.weight", f"model.decoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.norm2.bias", f"model.decoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"model.decoder.layers.{i}.fc1.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"model.decoder.layers.{i}.fc1.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"model.decoder.layers.{i}.fc2.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"model.decoder.layers.{i}.fc2.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.weight", f"model.decoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"model.decoder.layers.{i}.final_layer_norm.bias") )
# fmt: on
return rename_keys
def __magic_name__ ( __a : Any , __a : Tuple , __a : Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__ = dct.pop(__a )
UpperCamelCase__ = val
def __magic_name__ ( __a : Optional[int] , __a : List[str] ):
'''simple docstring'''
UpperCamelCase__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
UpperCamelCase__ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCamelCase__ = state_dict.pop(f"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight" )
UpperCamelCase__ = state_dict.pop(f"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ = in_proj_weight[:dim, :]
UpperCamelCase__ = in_proj_bias[: dim]
UpperCamelCase__ = in_proj_weight[
dim : dim * 2, :
]
UpperCamelCase__ = in_proj_bias[
dim : dim * 2
]
UpperCamelCase__ = in_proj_weight[
-dim :, :
]
UpperCamelCase__ = in_proj_bias[-dim :]
# fmt: on
def __magic_name__ ( __a : Optional[Any] , __a : Any ):
'''simple docstring'''
UpperCamelCase__ = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
UpperCamelCase__ = state_dict.pop(f"transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
UpperCamelCase__ = state_dict.pop(f"transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ = in_proj_weight[:hidden_size, :]
UpperCamelCase__ = in_proj_bias[:hidden_size]
UpperCamelCase__ = in_proj_weight[
hidden_size : hidden_size * 2, :
]
UpperCamelCase__ = in_proj_bias[hidden_size : hidden_size * 2]
UpperCamelCase__ = in_proj_weight[-hidden_size:, :]
UpperCamelCase__ = in_proj_bias[-hidden_size:]
def __magic_name__ ( ):
'''simple docstring'''
UpperCamelCase__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCamelCase__ = Image.open(requests.get(__a , stream=__a ).raw )
return im
@torch.no_grad()
def __magic_name__ ( __a : Any , __a : Dict , __a : List[str] ):
'''simple docstring'''
UpperCamelCase__ = get_deta_config(__a )
# load original state dict
if model_name == "deta-swin-large":
UpperCamelCase__ = hf_hub_download(repo_id="""nielsr/deta-checkpoints""" , filename="""adet_swin_ft.pth""" )
elif model_name == "deta-swin-large-o365":
UpperCamelCase__ = hf_hub_download(repo_id="""jozhang97/deta-swin-l-o365""" , filename="""deta_swin_pt_o365.pth""" )
else:
raise ValueError(f"Model name {model_name} not supported" )
UpperCamelCase__ = torch.load(__a , map_location="""cpu""" )["""model"""]
# original state dict
for name, param in state_dict.items():
print(__a , param.shape )
# rename keys
UpperCamelCase__ = create_rename_keys(__a )
for src, dest in rename_keys:
rename_key(__a , __a , __a )
read_in_swin_q_k_v(__a , config.backbone_config )
read_in_decoder_q_k_v(__a , __a )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
UpperCamelCase__ = state_dict.pop(__a )
UpperCamelCase__ = val
if "input_proj" in key:
UpperCamelCase__ = state_dict.pop(__a )
UpperCamelCase__ = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
UpperCamelCase__ = state_dict.pop(__a )
UpperCamelCase__ = val
# finally, create HuggingFace model and load state dict
UpperCamelCase__ = DetaForObjectDetection(__a )
model.load_state_dict(__a )
model.eval()
UpperCamelCase__ = """cuda""" if torch.cuda.is_available() else """cpu"""
model.to(__a )
# load image processor
UpperCamelCase__ = DetaImageProcessor(format="""coco_detection""" )
# verify our conversion on image
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = processor(images=__a , return_tensors="""pt""" )
UpperCamelCase__ = encoding["""pixel_values"""]
UpperCamelCase__ = model(pixel_values.to(__a ) )
# verify logits
print("""Logits:""" , outputs.logits[0, :3, :3] )
print("""Boxes:""" , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
UpperCamelCase__ = torch.tensor(
[[-7.6_308, -2.8_485, -5.3_737], [-7.2_037, -4.5_505, -4.8_027], [-7.2_943, -4.2_611, -4.6_617]] )
UpperCamelCase__ = torch.tensor([[0.4_987, 0.4_969, 0.9_999], [0.2_549, 0.5_498, 0.4_805], [0.5_498, 0.2_757, 0.0_569]] )
elif model_name == "deta-swin-large-o365":
UpperCamelCase__ = torch.tensor(
[[-8.0_122, -3.5_720, -4.9_717], [-8.1_547, -3.6_886, -4.6_389], [-7.6_610, -3.6_194, -5.0_134]] )
UpperCamelCase__ = torch.tensor([[0.2_523, 0.5_549, 0.4_881], [0.7_715, 0.4_149, 0.4_601], [0.5_503, 0.2_753, 0.0_575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(__a ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(__a ) , atol=1E-4 )
print("""Everything ok!""" )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f"Saving PyTorch model and processor to {pytorch_dump_folder_path}..." )
Path(__a ).mkdir(exist_ok=__a )
model.save_pretrained(__a )
processor.save_pretrained(__a )
# Push to hub
if push_to_hub:
print("""Pushing model and processor to hub...""" )
model.push_to_hub(f"jozhang97/{model_name}" )
processor.push_to_hub(f"jozhang97/{model_name}" )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
type=str,
default='''deta-swin-large''',
choices=['''deta-swin-large''', '''deta-swin-large-o365'''],
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
help='''Path to the folder to output PyTorch model.''',
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCamelCase_ = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 178
|
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
lowerCamelCase_ = logging.get_logger(__name__)
class __A( __lowerCamelCase ):
"""simple docstring"""
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
warnings.warn(
"""The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use SegformerImageProcessor instead.""" , SCREAMING_SNAKE_CASE_ , )
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 178
| 1
|
from copy import deepcopy
class a_ :
"""simple docstring"""
def __init__( self : Dict ,snake_case : list[int] | None = None ,snake_case : int | None = None ):
if arr is None and size is not None:
SCREAMING_SNAKE_CASE =size
SCREAMING_SNAKE_CASE =[0] * size
elif arr is not None:
self.init(__lowercase )
else:
raise ValueError('Either arr or size must be specified' )
def _lowerCAmelCase ( self : int ,snake_case : list[int] ):
SCREAMING_SNAKE_CASE =len(__lowercase )
SCREAMING_SNAKE_CASE =deepcopy(__lowercase )
for i in range(1 ,self.size ):
SCREAMING_SNAKE_CASE =self.next_(__lowercase )
if j < self.size:
self.tree[j] += self.tree[i]
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.tree[:]
for i in range(self.size - 1 ,0 ,-1 ):
SCREAMING_SNAKE_CASE =self.next_(__lowercase )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def _lowerCAmelCase ( snake_case : int ):
return index + (index & (-index))
@staticmethod
def _lowerCAmelCase ( snake_case : int ):
return index - (index & (-index))
def _lowerCAmelCase ( self : int ,snake_case : int ,snake_case : int ):
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
SCREAMING_SNAKE_CASE =self.next_(__lowercase )
def _lowerCAmelCase ( self : str ,snake_case : int ,snake_case : int ):
self.add(__lowercase ,value - self.get(__lowercase ) )
def _lowerCAmelCase ( self : str ,snake_case : int ):
if right == 0:
return 0
SCREAMING_SNAKE_CASE =self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
SCREAMING_SNAKE_CASE =self.prev(__lowercase )
return result
def _lowerCAmelCase ( self : Dict ,snake_case : int ,snake_case : int ):
return self.prefix(__lowercase ) - self.prefix(__lowercase )
def _lowerCAmelCase ( self : Dict ,snake_case : int ):
return self.query(__lowercase ,index + 1 )
def _lowerCAmelCase ( self : List[str] ,snake_case : int ):
value -= self.tree[0]
if value < 0:
return -1
SCREAMING_SNAKE_CASE =1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
SCREAMING_SNAKE_CASE =0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 334
|
'''simple docstring'''
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _a( UpperCamelCase__ : int, UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =old_name
if "patch_embed" in old_name:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int =old_name.split('''.''' )
if layer == "0":
SCREAMING_SNAKE_CASE__ : int =old_name.replace('''0''', '''convolution1''' )
elif layer == "1":
SCREAMING_SNAKE_CASE__ : Tuple =old_name.replace('''1''', '''batchnorm_before''' )
elif layer == "3":
SCREAMING_SNAKE_CASE__ : List[Any] =old_name.replace('''3''', '''convolution2''' )
else:
SCREAMING_SNAKE_CASE__ : Dict =old_name.replace('''4''', '''batchnorm_after''' )
if "network" in old_name and re.search(R'''\d\.\d''', UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Tuple =R'''\b\d{2}\b'''
if bool(re.search(UpperCamelCase__, UpperCamelCase__ ) ):
SCREAMING_SNAKE_CASE__ : int =re.search(R'''\d\.\d\d.''', UpperCamelCase__ ).group()
else:
SCREAMING_SNAKE_CASE__ : Tuple =re.search(R'''\d\.\d.''', UpperCamelCase__ ).group()
if int(match[0] ) < 6:
SCREAMING_SNAKE_CASE__ : List[str] =old_name.replace(UpperCamelCase__, '''''' )
SCREAMING_SNAKE_CASE__ : Any =trimmed_name.replace('''network''', match[0] + '''.meta4D_layers.blocks.''' + match[2:-1] )
SCREAMING_SNAKE_CASE__ : Any ='''intermediate_stages.''' + trimmed_name
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] =old_name.replace(UpperCamelCase__, '''''' )
if int(match[2] ) < num_meta4D_last_stage:
SCREAMING_SNAKE_CASE__ : str =trimmed_name.replace('''network''', '''meta4D_layers.blocks.''' + match[2] )
else:
SCREAMING_SNAKE_CASE__ : int =str(int(match[2] ) - num_meta4D_last_stage )
SCREAMING_SNAKE_CASE__ : Any =trimmed_name.replace('''network''', '''meta3D_layers.blocks.''' + layer_index )
if "norm1" in old_name:
SCREAMING_SNAKE_CASE__ : Optional[int] =trimmed_name.replace('''norm1''', '''layernorm1''' )
elif "norm2" in old_name:
SCREAMING_SNAKE_CASE__ : List[Any] =trimmed_name.replace('''norm2''', '''layernorm2''' )
elif "fc1" in old_name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =trimmed_name.replace('''fc1''', '''linear_in''' )
elif "fc2" in old_name:
SCREAMING_SNAKE_CASE__ : str =trimmed_name.replace('''fc2''', '''linear_out''' )
SCREAMING_SNAKE_CASE__ : Any ='''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(R'''.\d.''', UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : int =old_name.replace('''network''', '''intermediate_stages''' )
if "fc" in new_name:
SCREAMING_SNAKE_CASE__ : str =new_name.replace('''fc''', '''convolution''' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
SCREAMING_SNAKE_CASE__ : Tuple =new_name.replace('''norm1''', '''batchnorm_before''' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
SCREAMING_SNAKE_CASE__ : List[str] =new_name.replace('''norm2''', '''batchnorm_after''' )
if "proj" in new_name:
SCREAMING_SNAKE_CASE__ : Optional[int] =new_name.replace('''proj''', '''projection''' )
if "dist_head" in new_name:
SCREAMING_SNAKE_CASE__ : Optional[Any] =new_name.replace('''dist_head''', '''distillation_classifier''' )
elif "head" in new_name:
SCREAMING_SNAKE_CASE__ : Tuple =new_name.replace('''head''', '''classifier''' )
elif "patch_embed" in new_name:
SCREAMING_SNAKE_CASE__ : Optional[int] ='''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
SCREAMING_SNAKE_CASE__ : Any =new_name.replace('''norm''', '''layernorm''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] ='''efficientformer.''' + new_name
else:
SCREAMING_SNAKE_CASE__ : str ='''efficientformer.encoder.''' + new_name
return new_name
def _a( UpperCamelCase__ : int, UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
for key in checkpoint.copy().keys():
SCREAMING_SNAKE_CASE__ : List[str] =checkpoint.pop(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : str =val
return checkpoint
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE__ : List[str] =Image.open(requests.get(UpperCamelCase__, stream=UpperCamelCase__ ).raw )
return image
def _a( UpperCamelCase__ : Path, UpperCamelCase__ : Path, UpperCamelCase__ : Path, UpperCamelCase__ : bool ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict =torch.load(UpperCamelCase__, map_location='''cpu''' )['''model''']
SCREAMING_SNAKE_CASE__ : Optional[int] =EfficientFormerConfig.from_json_file(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] =EfficientFormerForImageClassificationWithTeacher(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : str ='''_'''.join(checkpoint_path.split('''/''' )[-1].split('''.''' )[0].split('''_''' )[:-1] )
SCREAMING_SNAKE_CASE__ : Tuple =config.depths[-1] - config.num_metaad_blocks + 1
SCREAMING_SNAKE_CASE__ : Tuple =convert_torch_checkpoint(UpperCamelCase__, UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE__ : Any ={
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
SCREAMING_SNAKE_CASE__ : Any =prepare_img()
SCREAMING_SNAKE_CASE__ : List[str] =2_5_6
SCREAMING_SNAKE_CASE__ : Optional[int] =2_2_4
SCREAMING_SNAKE_CASE__ : List[Any] =EfficientFormerImageProcessor(
size={'''shortest_edge''': image_size}, crop_size={'''height''': crop_size, '''width''': crop_size}, resample=pillow_resamplings['''bicubic'''], )
SCREAMING_SNAKE_CASE__ : str =processor(images=UpperCamelCase__, return_tensors='''pt''' ).pixel_values
# original processing pipeline
SCREAMING_SNAKE_CASE__ : List[Any] =Compose(
[
Resize(UpperCamelCase__, interpolation=pillow_resamplings['''bicubic'''] ),
CenterCrop(UpperCamelCase__ ),
ToTensor(),
Normalize(UpperCamelCase__, UpperCamelCase__ ),
] )
SCREAMING_SNAKE_CASE__ : List[str] =image_transforms(UpperCamelCase__ ).unsqueeze(0 )
assert torch.allclose(UpperCamelCase__, UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : int =model(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Tuple =outputs.logits
SCREAMING_SNAKE_CASE__ : Dict =(1, 1_0_0_0)
if "l1" in model_name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch.Tensor(
[-0.1_3_1_2, 0.4_3_5_3, -1.0_4_9_9, -0.5_1_2_4, 0.4_1_8_3, -0.6_7_9_3, -1.3_7_7_7, -0.0_8_9_3, -0.7_3_5_8, -2.4_3_2_8] )
assert torch.allclose(logits[0, :1_0], UpperCamelCase__, atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
SCREAMING_SNAKE_CASE__ : Optional[int] =torch.Tensor(
[-1.3_1_5_0, -1.5_4_5_6, -1.2_5_5_6, -0.8_4_9_6, -0.7_1_2_7, -0.7_8_9_7, -0.9_7_2_8, -0.3_0_5_2, 0.3_7_5_1, -0.3_1_2_7] )
assert torch.allclose(logits[0, :1_0], UpperCamelCase__, atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
SCREAMING_SNAKE_CASE__ : Optional[Any] =torch.Tensor(
[-1.0_2_8_3, -1.4_1_3_1, -0.5_6_4_4, -1.3_1_1_5, -0.5_7_8_5, -1.2_0_4_9, -0.7_5_2_8, 0.1_9_9_2, -0.3_8_2_2, -0.0_8_7_8] )
assert logits.shape == expected_shape
else:
raise ValueError(
f"Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7" )
# Save Checkpoints
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
print(f"Checkpoint successfuly converted. Model saved at {pytorch_dump_path}" )
processor.save_pretrained(UpperCamelCase__ )
print(f"Processor successfuly saved at {pytorch_dump_path}" )
if push_to_hub:
print('''Pushing model to the hub...''' )
model.push_to_hub(
repo_id=f"Bearnardd/{pytorch_dump_path}", commit_message='''Add model''', use_temp_dir=UpperCamelCase__, )
processor.push_to_hub(
repo_id=f"Bearnardd/{pytorch_dump_path}", commit_message='''Add image processor''', use_temp_dir=UpperCamelCase__, )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path',
default=None,
type=str,
required=True,
help='Path to EfficientFormer pytorch checkpoint.',
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for EfficientFormer model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
parser.set_defaults(push_to_hub=True)
a_ = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 152
| 0
|
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
random.seed(UpperCamelCase_ )
np.random.seed(UpperCamelCase_ )
torch.manual_seed(UpperCamelCase_ )
torch.cuda.manual_seed_all(UpperCamelCase_ )
# ^^ safe to call this function even if cuda is not available
class A__ :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case = 0.9999 , __snake_case = 0.0 , __snake_case = 0 , __snake_case = False , __snake_case = 1.0 , __snake_case = 2 / 3 , __snake_case = None , __snake_case = None , **__snake_case , ):
if isinstance(__snake_case , torch.nn.Module ):
snake_case = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage`''' , '''1.0.0''' , __snake_case , standard_warn=__snake_case , )
snake_case = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
snake_case = True
if kwargs.get('''max_value''' , __snake_case ) is not None:
snake_case = '''The `max_value` argument is deprecated. Please use `decay` instead.'''
deprecate('''max_value''' , '''1.0.0''' , __snake_case , standard_warn=__snake_case )
snake_case = kwargs['''max_value''']
if kwargs.get('''min_value''' , __snake_case ) is not None:
snake_case = '''The `min_value` argument is deprecated. Please use `min_decay` instead.'''
deprecate('''min_value''' , '''1.0.0''' , __snake_case , standard_warn=__snake_case )
snake_case = kwargs['''min_value''']
snake_case = list(__snake_case )
snake_case = [p.clone().detach() for p in parameters]
if kwargs.get('''device''' , __snake_case ) is not None:
snake_case = '''The `device` argument is deprecated. Please use `to` instead.'''
deprecate('''device''' , '''1.0.0''' , __snake_case , standard_warn=__snake_case )
self.to(device=kwargs['''device'''] )
snake_case = None
snake_case = decay
snake_case = min_decay
snake_case = update_after_step
snake_case = use_ema_warmup
snake_case = inv_gamma
snake_case = power
snake_case = 0
snake_case = None # set in `step()`
snake_case = model_cls
snake_case = model_config
@classmethod
def a_ ( cls , __snake_case , __snake_case ):
snake_case , snake_case = model_cls.load_config(__snake_case , return_unused_kwargs=__snake_case )
snake_case = model_cls.from_pretrained(__snake_case )
snake_case = cls(model.parameters() , model_cls=__snake_case , model_config=model.config )
ema_model.load_state_dict(__snake_case )
return ema_model
def a_ ( self , __snake_case ):
if self.model_cls is None:
raise ValueError('''`save_pretrained` can only be used if `model_cls` was defined at __init__.''' )
if self.model_config is None:
raise ValueError('''`save_pretrained` can only be used if `model_config` was defined at __init__.''' )
snake_case = self.model_cls.from_config(self.model_config )
snake_case = self.state_dict()
state_dict.pop('''shadow_params''' , __snake_case )
model.register_to_config(**__snake_case )
self.copy_to(model.parameters() )
model.save_pretrained(__snake_case )
def a_ ( self , __snake_case ):
snake_case = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
snake_case = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
snake_case = (1 + step) / (1_0 + step)
snake_case = min(__snake_case , self.decay )
# make sure decay is not smaller than min_decay
snake_case = max(__snake_case , self.min_decay )
return cur_decay_value
@torch.no_grad()
def a_ ( self , __snake_case ):
if isinstance(__snake_case , torch.nn.Module ):
snake_case = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage.step`''' , '''1.0.0''' , __snake_case , standard_warn=__snake_case , )
snake_case = parameters.parameters()
snake_case = list(__snake_case )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
snake_case = self.get_decay(self.optimization_step )
snake_case = decay
snake_case = 1 - decay
snake_case = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , __snake_case ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
snake_case = deepspeed.zero.GatheredParameters(__snake_case , modifier_rank=__snake_case )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(__snake_case )
def a_ ( self , __snake_case ):
snake_case = list(__snake_case )
for s_param, param in zip(self.shadow_params , __snake_case ):
param.data.copy_(s_param.to(param.device ).data )
def a_ ( self , __snake_case=None , __snake_case=None ):
snake_case = [
p.to(device=__snake_case , dtype=__snake_case ) if p.is_floating_point() else p.to(device=__snake_case )
for p in self.shadow_params
]
def a_ ( self ):
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def a_ ( self , __snake_case ):
snake_case = [param.detach().cpu().clone() for param in parameters]
def a_ ( self , __snake_case ):
if self.temp_stored_params is None:
raise RuntimeError('''This ExponentialMovingAverage has no `store()`ed weights ''' '''to `restore()`''' )
for c_param, param in zip(self.temp_stored_params , __snake_case ):
param.data.copy_(c_param.data )
# Better memory-wise.
snake_case = None
def a_ ( self , __snake_case ):
snake_case = copy.deepcopy(__snake_case )
snake_case = state_dict.get('''decay''' , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError('''Decay must be between 0 and 1''' )
snake_case = state_dict.get('''min_decay''' , self.min_decay )
if not isinstance(self.min_decay , __snake_case ):
raise ValueError('''Invalid min_decay''' )
snake_case = state_dict.get('''optimization_step''' , self.optimization_step )
if not isinstance(self.optimization_step , __snake_case ):
raise ValueError('''Invalid optimization_step''' )
snake_case = state_dict.get('''update_after_step''' , self.update_after_step )
if not isinstance(self.update_after_step , __snake_case ):
raise ValueError('''Invalid update_after_step''' )
snake_case = state_dict.get('''use_ema_warmup''' , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , __snake_case ):
raise ValueError('''Invalid use_ema_warmup''' )
snake_case = state_dict.get('''inv_gamma''' , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError('''Invalid inv_gamma''' )
snake_case = state_dict.get('''power''' , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError('''Invalid power''' )
snake_case = state_dict.get('''shadow_params''' , __snake_case )
if shadow_params is not None:
snake_case = shadow_params
if not isinstance(self.shadow_params , __snake_case ):
raise ValueError('''shadow_params must be a list''' )
if not all(isinstance(__snake_case , torch.Tensor ) for p in self.shadow_params ):
raise ValueError('''shadow_params must all be Tensors''' )
| 213
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = UnCLIPImageVariationPipeline
__magic_name__ = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
__magic_name__ = IMAGE_VARIATION_BATCH_PARAMS
__magic_name__ = [
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
__magic_name__ = False
@property
def a_ ( self ):
return 3_2
@property
def a_ ( self ):
return 3_2
@property
def a_ ( self ):
return self.time_input_dim
@property
def a_ ( self ):
return self.time_input_dim * 4
@property
def a_ ( self ):
return 1_0_0
@property
def a_ ( self ):
snake_case = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def a_ ( self ):
torch.manual_seed(0 )
snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(__snake_case )
@property
def a_ ( self ):
torch.manual_seed(0 )
snake_case = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=3_2 , intermediate_size=3_7 , patch_size=1 , )
return CLIPVisionModelWithProjection(__snake_case )
@property
def a_ ( self ):
torch.manual_seed(0 )
snake_case = {
'''clip_embeddings_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''cross_attention_dim''': self.cross_attention_dim,
}
snake_case = UnCLIPTextProjModel(**__snake_case )
return model
@property
def a_ ( self ):
torch.manual_seed(0 )
snake_case = {
'''sample_size''': 3_2,
# RGB in channels
'''in_channels''': 3,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 6,
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': '''identity''',
}
snake_case = UNetaDConditionModel(**__snake_case )
return model
@property
def a_ ( self ):
return {
"sample_size": 6_4,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def a_ ( self ):
torch.manual_seed(0 )
snake_case = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def a_ ( self ):
# seeded differently to get different unet than `self.dummy_super_res_first`
torch.manual_seed(1 )
snake_case = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def a_ ( self ):
snake_case = self.dummy_decoder
snake_case = self.dummy_text_proj
snake_case = self.dummy_text_encoder
snake_case = self.dummy_tokenizer
snake_case = self.dummy_super_res_first
snake_case = self.dummy_super_res_last
snake_case = UnCLIPScheduler(
variance_type='''learned_range''' , prediction_type='''epsilon''' , num_train_timesteps=1_0_0_0 , )
snake_case = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''epsilon''' , num_train_timesteps=1_0_0_0 , )
snake_case = CLIPImageProcessor(crop_size=3_2 , size=3_2 )
snake_case = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def a_ ( self , __snake_case , __snake_case=0 , __snake_case=True ):
snake_case = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__snake_case ) ).to(__snake_case )
if str(__snake_case ).startswith('''mps''' ):
snake_case = torch.manual_seed(__snake_case )
else:
snake_case = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
if pil_image:
snake_case = input_image * 0.5 + 0.5
snake_case = input_image.clamp(0 , 1 )
snake_case = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
snake_case = DiffusionPipeline.numpy_to_pil(__snake_case )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def a_ ( self ):
snake_case = '''cpu'''
snake_case = self.get_dummy_components()
snake_case = self.pipeline_class(**__snake_case )
snake_case = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
snake_case = pipe(**__snake_case )
snake_case = output.images
snake_case = self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
snake_case = pipe(
**__snake_case , return_dict=__snake_case , )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case = np.array(
[
0.9997,
0.0002,
0.9997,
0.9997,
0.9969,
0.0023,
0.9997,
0.9969,
0.9970,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def a_ ( self ):
snake_case = '''cpu'''
snake_case = self.get_dummy_components()
snake_case = self.pipeline_class(**__snake_case )
snake_case = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
snake_case = pipe(**__snake_case )
snake_case = output.images
snake_case = self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
snake_case = pipe(
**__snake_case , return_dict=__snake_case , )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case = np.array([0.9997, 0.0003, 0.9997, 0.9997, 0.9970, 0.0024, 0.9997, 0.9971, 0.9971] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def a_ ( self ):
snake_case = '''cpu'''
snake_case = self.get_dummy_components()
snake_case = self.pipeline_class(**__snake_case )
snake_case = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
snake_case = [
pipeline_inputs['''image'''],
pipeline_inputs['''image'''],
]
snake_case = pipe(**__snake_case )
snake_case = output.images
snake_case = self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
snake_case = [
tuple_pipeline_inputs['''image'''],
tuple_pipeline_inputs['''image'''],
]
snake_case = pipe(
**__snake_case , return_dict=__snake_case , )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 6_4, 6_4, 3)
snake_case = np.array(
[
0.9997,
0.9989,
0.0008,
0.0021,
0.9960,
0.0018,
0.0014,
0.0002,
0.9933,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def a_ ( self ):
snake_case = torch.device('''cpu''' )
class A__ :
"""simple docstring"""
__magic_name__ = 1
snake_case = self.get_dummy_components()
snake_case = self.pipeline_class(**__snake_case )
snake_case = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = torch.Generator(device=__snake_case ).manual_seed(0 )
snake_case = pipe.decoder.dtype
snake_case = 1
snake_case = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
snake_case = pipe.prepare_latents(
__snake_case , dtype=__snake_case , device=__snake_case , generator=__snake_case , latents=__snake_case , scheduler=DummyScheduler() )
snake_case = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
snake_case = pipe.prepare_latents(
__snake_case , dtype=__snake_case , device=__snake_case , generator=__snake_case , latents=__snake_case , scheduler=DummyScheduler() )
snake_case = self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
snake_case = pipe(
**__snake_case , decoder_latents=__snake_case , super_res_latents=__snake_case ).images
snake_case = self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
# Don't pass image, instead pass embedding
snake_case = pipeline_inputs.pop('''image''' )
snake_case = pipe.image_encoder(__snake_case ).image_embeds
snake_case = pipe(
**__snake_case , decoder_latents=__snake_case , super_res_latents=__snake_case , image_embeddings=__snake_case , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1E-4
@skip_mps
def a_ ( self ):
snake_case = torch_device == '''cpu'''
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
snake_case = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=__snake_case , expected_max_diff=__snake_case )
@skip_mps
def a_ ( self ):
snake_case = torch_device == '''cpu'''
snake_case = True
snake_case = [
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
self._test_inference_batch_single_identical(
test_max_difference=__snake_case , relax_max_difference=__snake_case , additional_params_copy_to_batched_inputs=__snake_case , )
def a_ ( self ):
snake_case = [
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
snake_case = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=__snake_case , additional_params_copy_to_batched_inputs=__snake_case , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=__snake_case )
@skip_mps
def a_ ( self ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def a_ ( self ):
return super().test_save_load_local()
@skip_mps
def a_ ( self ):
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
"""simple docstring"""
def a_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self ):
snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png''' )
snake_case = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/unclip/karlo_v1_alpha_cat_variation_fp16.npy''' )
snake_case = UnCLIPImageVariationPipeline.from_pretrained(
'''kakaobrain/karlo-v1-alpha-image-variations''' , torch_dtype=torch.floataa )
snake_case = pipeline.to(__snake_case )
pipeline.set_progress_bar_config(disable=__snake_case )
snake_case = torch.Generator(device='''cpu''' ).manual_seed(0 )
snake_case = pipeline(
__snake_case , generator=__snake_case , output_type='''np''' , )
snake_case = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
assert_mean_pixel_difference(__snake_case , __snake_case , 1_5 )
| 213
| 1
|
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowercase ( __a ):
def __init__( self , *_a , _a=None , _a=None , **_a ) -> Union[str, Any]:
super().__init__(*_a , **_a )
_A : Union[str, Any] = eval_examples
_A : str = post_process_function
def a__ ( self , _a=None , _a=None , _a=None , _a = "eval" ) -> str:
_A : List[str] = self.eval_dataset if eval_dataset is None else eval_dataset
_A : Optional[Any] = self.get_eval_dataloader(_a )
_A : List[str] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_A : Union[str, Any] = self.compute_metrics
_A : Optional[int] = None
_A : Union[str, Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_A : Optional[Any] = time.time()
try:
_A : int = eval_loop(
_a , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_a , metric_key_prefix=_a , )
finally:
_A : Union[str, Any] = compute_metrics
_A : Tuple = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_a , _a , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
_A : Any = self.post_process_function(_a , _a , output.predictions )
_A : List[Any] = self.compute_metrics(_a )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
_A : List[str] = metrics.pop(_a )
metrics.update(output.metrics )
else:
_A : Dict = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_a )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_A : str = self.callback_handler.on_evaluate(self.args , self.state , self.control , _a )
return metrics
def a__ ( self , _a , _a , _a=None , _a = "test" ) -> int:
_A : List[str] = self.get_test_dataloader(_a )
# Temporarily disable metric computation, we will do it in the loop here.
_A : Optional[Any] = self.compute_metrics
_A : Optional[int] = None
_A : Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_A : Dict = time.time()
try:
_A : int = eval_loop(
_a , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_a , metric_key_prefix=_a , )
finally:
_A : int = compute_metrics
_A : Optional[Any] = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_a , _a , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
_A : List[Any] = self.post_process_function(_a , _a , output.predictions , """predict""" )
_A : Optional[Any] = self.compute_metrics(_a )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
_A : List[Any] = metrics.pop(_a )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_a )
| 26
|
'''simple docstring'''
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
A =logging.getLogger(__name__)
A ='Hello world! cécé herlolip'
A =namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def snake_case_ (_a : List[Any] , _a : Any ):
UpperCAmelCase = BertAbsConfig(
temp_dir='''.''' , finetune_bert=_a , large=_a , share_emb=_a , use_bert_emb=_a , encoder='''bert''' , max_pos=5_1_2 , enc_layers=6 , enc_hidden_size=5_1_2 , enc_heads=8 , enc_ff_size=5_1_2 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_6_8 , dec_heads=8 , dec_ff_size=2_0_4_8 , dec_dropout=0.2 , )
UpperCAmelCase = torch.load(_a , lambda _a , _a : storage )
UpperCAmelCase = AbsSummarizer(_a , torch.device('''cpu''' ) , _a )
original.eval()
UpperCAmelCase = BertAbsSummarizer(_a , torch.device('''cpu''' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('''convert the model''' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('''Make sure that the models\' outputs are identical''' )
UpperCAmelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' )
# prepare the model inputs
UpperCAmelCase = tokenizer.encode('''This is sample éàalj\'-.''' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(_a )) )
UpperCAmelCase = torch.tensor(_a ).unsqueeze(0 )
UpperCAmelCase = tokenizer.encode('''This is sample 3 éàalj\'-.''' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(_a )) )
UpperCAmelCase = torch.tensor(_a ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
UpperCAmelCase = encoder_input_ids
UpperCAmelCase = decoder_input_ids
UpperCAmelCase = UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = UpperCAmelCase = None
UpperCAmelCase = UpperCAmelCase = None
UpperCAmelCase = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
UpperCAmelCase = original(_a , _a , _a , _a , _a , _a , _a )[0]
UpperCAmelCase = original.generator(_a )
UpperCAmelCase = new_model(
_a , _a , _a , _a , _a )[0]
UpperCAmelCase = new_model.generator(_a )
UpperCAmelCase = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(_a ) )
UpperCAmelCase = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(_a ) )
UpperCAmelCase = torch.allclose(_a , _a , atol=1E-3 )
if are_identical:
logging.info('''all weights are equal up to 1e-3''' )
else:
raise ValueError('''the weights are different. The new model is likely different from the original one.''' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('''saving the model\'s state dictionary''' )
torch.save(
new_model.state_dict() , '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''' )
if __name__ == "__main__":
A =argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
A =parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 34
| 0
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCamelCase_ (metaclass=a__ ):
"""simple docstring"""
_lowerCAmelCase = ['onnx']
def __init__( self : List[Any] , *_lowerCamelCase : List[Any] , **_lowerCamelCase : List[str] ):
"""simple docstring"""
requires_backends(self , ['''onnx'''] )
@classmethod
def _a ( cls : str , *_lowerCamelCase : Tuple , **_lowerCamelCase : Tuple ):
"""simple docstring"""
requires_backends(cls , ['''onnx'''] )
@classmethod
def _a ( cls : Optional[Any] , *_lowerCamelCase : str , **_lowerCamelCase : Optional[int] ):
"""simple docstring"""
requires_backends(cls , ['''onnx'''] )
| 4
|
'''simple docstring'''
import heapq
def snake_case__ ( lowerCamelCase__ : dict ) -> set[int]:
A_ : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowerCamelCase__ , [-1 * len(lowerCamelCase__ ), (key, value)] )
# chosen_vertices = set of chosen vertices
A_ : str = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
A_ : Tuple = heapq.heappop(lowerCamelCase__ )[1][0]
chosen_vertices.add(lowerCamelCase__ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
A_ : List[str] = elem[1][1].index(lowerCamelCase__ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowerCamelCase__ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case__ = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F'Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}')
| 4
| 1
|
def lowerCAmelCase_ ( __a ) -> list[int]:
"""simple docstring"""
lowerCamelCase__: List[str] =len(__a )
for i in range(__a ):
for j in range(i + 1 , __a ):
if numbers[j] < numbers[i]:
lowerCamelCase__ , lowerCamelCase__: Optional[Any] =numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
__A = input("Enter numbers separated by a comma:\n").strip()
__A = [int(item) for item in user_input.split(",")]
print(exchange_sort(unsorted))
| 10
|
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__A = logging.get_logger(__name__)
__A = "▁"
__A = {"vocab_file": "prophetnet.tokenizer"}
__A = {
"vocab_file": {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer"
),
}
}
__A = {
"microsoft/xprophetnet-large-wiki100-cased": {"do_lower_case": False},
}
__A = {
"microsoft/xprophetnet-large-wiki100-cased": 512,
}
def lowerCAmelCase_ ( __a ) -> int:
"""simple docstring"""
lowerCamelCase__: Optional[Any] =collections.OrderedDict()
with open(__a , "r" , encoding="utf-8" ) as reader:
lowerCamelCase__: int =reader.readlines()
for index, token in enumerate(__a ):
lowerCamelCase__: List[str] =token.rstrip("\n" )
lowerCamelCase__: List[Any] =index
return vocab
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = ["input_ids", "attention_mask"]
def __init__(self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any]="[SEP]" , UpperCAmelCase_ : List[Any]="[SEP]" , UpperCAmelCase_ : Optional[Any]="[SEP]" , UpperCAmelCase_ : int="[UNK]" , UpperCAmelCase_ : Optional[Any]="[PAD]" , UpperCAmelCase_ : Dict="[CLS]" , UpperCAmelCase_ : Dict="[MASK]" , UpperCAmelCase_ : Optional[Dict[str, Any]] = None , **UpperCAmelCase_ : Tuple , ) ->None:
'''simple docstring'''
lowerCamelCase__: int ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece")
raise
lowerCamelCase__: Optional[int] =spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(UpperCAmelCase_))
lowerCamelCase__: Optional[int] =vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
lowerCamelCase__: Optional[int] ={"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4}
for i in range(10):
lowerCamelCase__: Optional[int] =F"""[unused{i}]"""
lowerCamelCase__: int =5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
lowerCamelCase__: int =12
lowerCamelCase__: Optional[Any] ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(UpperCAmelCase_)
def __getstate__(self : List[str]) ->Dict:
'''simple docstring'''
lowerCamelCase__: Optional[int] =self.__dict__.copy()
lowerCamelCase__: Dict =None
return state
def __setstate__(self : List[str] , UpperCAmelCase_ : Union[str, Any]) ->Dict:
'''simple docstring'''
lowerCamelCase__: Tuple =d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece")
raise
# for backward compatibility
if not hasattr(self , "sp_model_kwargs"):
lowerCamelCase__: Dict ={}
lowerCamelCase__: Tuple =spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False) ->List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_)
if token_ids_a is None:
return ([0] * len(UpperCAmelCase_)) + [1]
return ([0] * len(UpperCAmelCase_)) + [1] + ([0] * len(UpperCAmelCase_)) + [1]
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None) ->List[int]:
'''simple docstring'''
lowerCamelCase__: Any =[self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def SCREAMING_SNAKE_CASE_ (self : str) ->Dict:
'''simple docstring'''
return len(self.sp_model) + self.fairseq_offset
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Tuple:
'''simple docstring'''
lowerCamelCase__: str ={self.convert_ids_to_tokens(UpperCAmelCase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : str) ->str:
'''simple docstring'''
return self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : List[Any]) ->str:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCamelCase__: str =self.sp_model.PieceToId(UpperCAmelCase_)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : Optional[Any]) ->Optional[int]:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Optional[Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] ="".join(UpperCAmelCase_).replace(UpperCAmelCase_ , " ").strip()
return out_string
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase_):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""")
return
lowerCamelCase__: List[str] =os.path.join(
UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCAmelCase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , UpperCAmelCase_)
elif not os.path.isfile(self.vocab_file):
with open(UpperCAmelCase_ , "wb") as fi:
lowerCamelCase__: Dict =self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_)
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None) ->List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
lowerCamelCase__: Union[str, Any] =[self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 10
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'''kssteven/ibert-roberta-base''': '''https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json''',
'''kssteven/ibert-roberta-large''': '''https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json''',
'''kssteven/ibert-roberta-large-mnli''': (
'''https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'''
),
}
class lowercase_ ( __lowercase ):
UpperCamelCase_ : Optional[int] = "ibert"
def __init__( self : Optional[Any] , A__ : List[Any]=30522 , A__ : Optional[int]=768 , A__ : List[str]=12 , A__ : Tuple=12 , A__ : Optional[int]=3072 , A__ : List[Any]="gelu" , A__ : int=0.1 , A__ : Optional[int]=0.1 , A__ : Optional[int]=512 , A__ : List[Any]=2 , A__ : Optional[int]=0.02 , A__ : List[str]=1e-12 , A__ : Optional[int]=1 , A__ : List[str]=0 , A__ : Optional[Any]=2 , A__ : Union[str, Any]="absolute" , A__ : Union[str, Any]=False , A__ : List[str]="none" , **A__ : int , ) -> Union[str, Any]:
super().__init__(pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ , **A__ )
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = hidden_act
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = position_embedding_type
_snake_case = quant_mode
_snake_case = force_dequant
class lowercase_ ( __lowercase ):
@property
def UpperCamelCase_ ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_snake_case = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_snake_case = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 278
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 278
| 1
|
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: str = jnp.ones((batch_size, length)) / length
return scores
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Dict = None
SCREAMING_SNAKE_CASE_: str = 20
SCREAMING_SNAKE_CASE_: List[Any] = self._get_uniform_logits(batch_size=2 , length=lowerCAmelCase__)
# tweak scores to not be uniform anymore
SCREAMING_SNAKE_CASE_: List[str] = scores.at[1, 5].set((1 / length) + 0.1) # peak, 1st batch
SCREAMING_SNAKE_CASE_: Any = scores.at[1, 10].set((1 / length) - 0.4) # valley, 1st batch
# compute softmax
SCREAMING_SNAKE_CASE_: Dict = jax.nn.softmax(lowerCAmelCase__ , axis=-1)
SCREAMING_SNAKE_CASE_: Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5)
SCREAMING_SNAKE_CASE_: List[str] = FlaxTemperatureLogitsWarper(temperature=1.3)
SCREAMING_SNAKE_CASE_: str = jax.nn.softmax(temp_dist_warper_sharper(lowerCAmelCase__ , scores.copy() , cur_len=lowerCAmelCase__) , axis=-1)
SCREAMING_SNAKE_CASE_: int = jax.nn.softmax(temp_dist_warper_smoother(lowerCAmelCase__ , scores.copy() , cur_len=lowerCAmelCase__) , axis=-1)
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3))
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3))
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max())
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min())
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max())
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min())
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: List[str] = None
SCREAMING_SNAKE_CASE_: str = 10
SCREAMING_SNAKE_CASE_: Tuple = 2
# create ramp distribution
SCREAMING_SNAKE_CASE_: Optional[Any] = np.broadcast_to(np.arange(lowerCAmelCase__)[None, :] , (batch_size, vocab_size)).copy()
SCREAMING_SNAKE_CASE_: Dict = ramp_logits[1:, : vocab_size // 2] + vocab_size
SCREAMING_SNAKE_CASE_: Union[str, Any] = FlaxTopKLogitsWarper(3)
SCREAMING_SNAKE_CASE_: Dict = top_k_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0]).tolist() , 7 * [True] + 3 * [False])
self.assertListEqual(jnp.isinf(scores[1]).tolist() , 2 * [True] + 3 * [False] + 5 * [True])
# check special case
SCREAMING_SNAKE_CASE_: Any = 5
SCREAMING_SNAKE_CASE_: str = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3)
SCREAMING_SNAKE_CASE_: Any = np.broadcast_to(np.arange(lowerCAmelCase__)[None, :] , (batch_size, length)).copy()
SCREAMING_SNAKE_CASE_: Any = top_k_warp_safety_check(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1).tolist() , [2, 2])
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: Tuple = None
SCREAMING_SNAKE_CASE_: Dict = 10
SCREAMING_SNAKE_CASE_: Dict = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
SCREAMING_SNAKE_CASE_: Tuple = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]]))
SCREAMING_SNAKE_CASE_: int = FlaxTopPLogitsWarper(0.8)
SCREAMING_SNAKE_CASE_: Optional[Any] = np.exp(top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__))
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
SCREAMING_SNAKE_CASE_: Dict = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]])
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3))
# check edge cases with negative and extreme logits
SCREAMING_SNAKE_CASE_: Union[str, Any] = np.broadcast_to(np.arange(lowerCAmelCase__)[None, :] , (batch_size, vocab_size)).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
SCREAMING_SNAKE_CASE_: Dict = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
SCREAMING_SNAKE_CASE_: str = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0)
SCREAMING_SNAKE_CASE_: Any = top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1).tolist() , [3, 2])
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Tuple = 20
SCREAMING_SNAKE_CASE_: List[str] = 4
SCREAMING_SNAKE_CASE_: Optional[int] = 0
SCREAMING_SNAKE_CASE_: str = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCAmelCase__)
# check that min length is applied at length 5
SCREAMING_SNAKE_CASE_: str = ids_tensor((batch_size, 20) , vocab_size=20)
SCREAMING_SNAKE_CASE_: int = 5
SCREAMING_SNAKE_CASE_: List[Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = min_dist_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("inf")])
# check that min length is not applied anymore at length 15
SCREAMING_SNAKE_CASE_: List[str] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = 15
SCREAMING_SNAKE_CASE_: Any = min_dist_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertFalse(jnp.isinf(lowerCAmelCase__).any())
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: int = 20
SCREAMING_SNAKE_CASE_: str = 4
SCREAMING_SNAKE_CASE_: List[Any] = 0
SCREAMING_SNAKE_CASE_: Optional[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase__)
# check that all scores are -inf except the bos_token_id score
SCREAMING_SNAKE_CASE_: int = ids_tensor((batch_size, 1) , vocab_size=20)
SCREAMING_SNAKE_CASE_: List[str] = 1
SCREAMING_SNAKE_CASE_: Union[str, Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :]).all())
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0]) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
SCREAMING_SNAKE_CASE_: List[Any] = 3
SCREAMING_SNAKE_CASE_: Optional[Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertFalse(jnp.isinf(lowerCAmelCase__).any())
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: Any = 20
SCREAMING_SNAKE_CASE_: Optional[Any] = 4
SCREAMING_SNAKE_CASE_: Dict = 0
SCREAMING_SNAKE_CASE_: List[Any] = 5
SCREAMING_SNAKE_CASE_: Union[str, Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__)
# check that all scores are -inf except the eos_token_id when max_length is reached
SCREAMING_SNAKE_CASE_: List[Any] = ids_tensor((batch_size, 4) , vocab_size=20)
SCREAMING_SNAKE_CASE_: Optional[int] = 4
SCREAMING_SNAKE_CASE_: Dict = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :]).all())
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0]) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
SCREAMING_SNAKE_CASE_: List[str] = 3
SCREAMING_SNAKE_CASE_: str = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertFalse(jnp.isinf(lowerCAmelCase__).any())
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: int = 4
SCREAMING_SNAKE_CASE_: List[Any] = 10
SCREAMING_SNAKE_CASE_: int = 15
SCREAMING_SNAKE_CASE_: Dict = 2
SCREAMING_SNAKE_CASE_: int = 1
SCREAMING_SNAKE_CASE_: List[Any] = 15
# dummy input_ids and scores
SCREAMING_SNAKE_CASE_: int = ids_tensor((batch_size, sequence_length) , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = input_ids.copy()
SCREAMING_SNAKE_CASE_: List[Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = scores.copy()
# instantiate all dist processors
SCREAMING_SNAKE_CASE_: Optional[int] = FlaxTemperatureLogitsWarper(temperature=0.5)
SCREAMING_SNAKE_CASE_: Tuple = FlaxTopKLogitsWarper(3)
SCREAMING_SNAKE_CASE_: Optional[int] = FlaxTopPLogitsWarper(0.8)
# instantiate all logits processors
SCREAMING_SNAKE_CASE_: Optional[int] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = 10
# no processor list
SCREAMING_SNAKE_CASE_: Dict = temp_dist_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = top_k_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = min_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = bos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = eos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
# with processor list
SCREAMING_SNAKE_CASE_: str = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc])
SCREAMING_SNAKE_CASE_: Tuple = processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
# scores should be equal
self.assertTrue(jnp.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3))
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist())
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: Optional[int] = 4
SCREAMING_SNAKE_CASE_: int = 10
SCREAMING_SNAKE_CASE_: List[str] = 15
SCREAMING_SNAKE_CASE_: List[Any] = 2
SCREAMING_SNAKE_CASE_: Union[str, Any] = 1
SCREAMING_SNAKE_CASE_: str = 15
# dummy input_ids and scores
SCREAMING_SNAKE_CASE_: Tuple = ids_tensor((batch_size, sequence_length) , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = input_ids.copy()
SCREAMING_SNAKE_CASE_: List[Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = scores.copy()
# instantiate all dist processors
SCREAMING_SNAKE_CASE_: Dict = FlaxTemperatureLogitsWarper(temperature=0.5)
SCREAMING_SNAKE_CASE_: Union[str, Any] = FlaxTopKLogitsWarper(3)
SCREAMING_SNAKE_CASE_: Dict = FlaxTopPLogitsWarper(0.8)
# instantiate all logits processors
SCREAMING_SNAKE_CASE_: int = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = 10
# no processor list
def run_no_processor_list(lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Dict):
SCREAMING_SNAKE_CASE_: Any = temp_dist_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = top_k_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = min_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = bos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = eos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
return scores
# with processor list
def run_processor_list(lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Union[str, Any]):
SCREAMING_SNAKE_CASE_: List[str] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc])
SCREAMING_SNAKE_CASE_: Dict = processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
return scores
SCREAMING_SNAKE_CASE_: str = jax.jit(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = jax.jit(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = jitted_run_no_processor_list(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = jitted_run_processor_list(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
# scores should be equal
self.assertTrue(jnp.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3))
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist())
| 13
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase : Optional[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class __lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = XGLMTokenizer
_UpperCAmelCase : List[Any] = XGLMTokenizerFast
_UpperCAmelCase : Optional[int] = True
_UpperCAmelCase : Tuple = True
def _SCREAMING_SNAKE_CASE ( self : Tuple):
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE_: List[Any] = XGLMTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__)
tokenizer.save_pretrained(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: Optional[Any] = "<pad>"
SCREAMING_SNAKE_CASE_: int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__) , lowerCAmelCase__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__) , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: Optional[int] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , "<s>")
self.assertEqual(vocab_keys[1] , "<pad>")
self.assertEqual(len(lowerCAmelCase__) , 1008)
def _SCREAMING_SNAKE_CASE ( self : Any):
self.assertEqual(self.get_tokenizer().vocab_size , 1008)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Optional[int] = XGLMTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = tokenizer.tokenize("This is a test")
self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
SCREAMING_SNAKE_CASE_: List[str] = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
SCREAMING_SNAKE_CASE_: Optional[Any] = tokenizer.convert_tokens_to_ids(lowerCAmelCase__)
self.assertListEqual(
lowerCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE_: List[Any] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Any):
return XGLMTokenizer.from_pretrained("facebook/xglm-564M")
def _SCREAMING_SNAKE_CASE ( self : str):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCAmelCase__ , f.name)
SCREAMING_SNAKE_CASE_: Tuple = XGLMTokenizer(f.name , keep_accents=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = pickle.dumps(lowerCAmelCase__)
pickle.loads(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : str):
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE_: Dict = self.get_tokenizer()
SCREAMING_SNAKE_CASE_: List[str] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_: Any = "I was born in 92000, and this is falsé."
SCREAMING_SNAKE_CASE_: Union[str, Any] = tokenizer.tokenize(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = rust_tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_: str = tokenizer.encode(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: Dict = "Hello World!"
SCREAMING_SNAKE_CASE_: Union[str, Any] = [2, 3_1227, 4447, 35]
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__))
@slow
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: Union[str, Any] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"
)
# fmt: off
SCREAMING_SNAKE_CASE_: Optional[Any] = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 7_1630, 2_8085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 1_3675, 377, 652, 7580, 1_0341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 20_2277, 1_7892, 33, 60, 87, 4, 3234, 157, 61, 2667, 5_2376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__))
@slow
def _SCREAMING_SNAKE_CASE ( self : int):
# fmt: off
SCREAMING_SNAKE_CASE_: str = {
"input_ids": [[2, 10_8825, 1163, 15, 8_8010, 473, 1_5898, 157, 1_3672, 1857, 312, 8, 23_8021, 1163, 53, 1_3672, 1857, 312, 8, 5_3283, 18_2396, 8, 1_8566, 16, 3_6733, 4101, 8, 230, 24_4017, 12_2553, 7, 15, 13_2597, 4, 293, 1_2511, 7610, 4, 3414, 13_2597, 9, 4, 3_2361, 362, 4, 734, 2_8512, 3_2569, 18, 4, 3_2361, 2_6096, 1_4982, 73, 1_8715, 2_1433, 23_5261, 15, 492, 1_2427, 16, 53, 1_8715, 2_1433, 6_5454, 15, 2_3659, 563, 16, 278, 597, 2843, 595, 7931, 18_2396, 6_4186, 22, 886, 595, 13_2981, 53, 2_5540, 3449, 4_3982, 3_9901, 5951, 878, 330, 4, 2_7694, 8_0269, 312, 53, 6517, 1_1780, 611, 2_0408, 5], [2, 6, 13_2597, 67, 4_2897, 33, 592, 8, 16_3729, 2_5540, 361, 13_6997, 10_9514, 17_3230, 7, 501, 60, 10_2913, 196, 5631, 235, 6_3243, 473, 6, 23_1757, 74, 5277, 7905, 53, 3095, 3_7317, 22, 454, 18_3874, 5], [2, 268, 3_1298, 4_6530, 6, 13_2935, 4_3831, 7, 597, 32, 24, 3688, 9865, 5]],
"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name="facebook/xglm-564M" , padding=lowerCAmelCase__ , )
| 13
| 1
|
'''simple docstring'''
from __future__ import annotations
import os
from collections.abc import Mapping
a__ : Optional[int] = tuple[int, int]
class UpperCAmelCase__ :
def __init__( self , lowercase , lowercase ) -> None:
__UpperCamelCase = vertices
__UpperCamelCase = {
(min(lowercase ), max(lowercase )): weight for edge, weight in edges.items()
}
def __lowerCamelCase ( self , lowercase , lowercase ) -> None:
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
__UpperCamelCase = weight
def __lowerCamelCase ( self ) -> Graph:
__UpperCamelCase = Graph({min(self.vertices )} , {} )
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
while len(subgraph.vertices ) < len(self.vertices ):
__UpperCamelCase = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
__UpperCamelCase = edge
__UpperCamelCase = weight
subgraph.add_edge(lowercase , lowercase )
return subgraph
def _lowercase ( __A = "p107_network.txt" ):
'''simple docstring'''
__UpperCamelCase = os.path.abspath(os.path.dirname(__A ) )
__UpperCamelCase = os.path.join(__A ,__A )
__UpperCamelCase = {}
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
with open(__A ) as f:
__UpperCamelCase = f.read().strip().split("""\n""" )
__UpperCamelCase = [line.split(""",""" ) for line in data]
for edgea in range(1 ,len(__A ) ):
for edgea in range(__A ):
if adjaceny_matrix[edgea][edgea] != "-":
__UpperCamelCase = int(adjaceny_matrix[edgea][edgea] )
__UpperCamelCase = Graph(set(range(len(__A ) ) ) ,__A )
__UpperCamelCase = graph.prims_algorithm()
__UpperCamelCase = sum(graph.edges.values() )
__UpperCamelCase = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 243
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCAmelCase__ ( unittest.TestCase):
def __lowerCamelCase ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __lowerCamelCase ( self ) -> Any:
torch.manual_seed(0 )
__UpperCamelCase = UNetaDModel(
sample_size=(3_2, 6_4) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , )
return model
@property
def __lowerCamelCase ( self ) -> List[Any]:
torch.manual_seed(0 )
__UpperCamelCase = UNetaDConditionModel(
sample_size=(6_4, 3_2) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , cross_attention_dim=1_0 , )
return model
@property
def __lowerCamelCase ( self ) -> Optional[Any]:
torch.manual_seed(0 )
__UpperCamelCase = AutoencoderKL(
sample_size=(1_2_8, 6_4) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , )
__UpperCamelCase = UNetaDModel(
sample_size=(6_4, 3_2) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , )
return vqvae, unet
@slow
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
__UpperCamelCase = DDPMScheduler()
__UpperCamelCase = AudioDiffusionPipeline(vqvae=lowercase , unet=self.dummy_unet , mel=lowercase , scheduler=lowercase )
__UpperCamelCase = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(4_2 )
__UpperCamelCase = pipe(generator=lowercase , steps=4 )
__UpperCamelCase = output.audios[0]
__UpperCamelCase = output.images[0]
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(4_2 )
__UpperCamelCase = pipe(generator=lowercase , steps=4 , return_dict=lowercase )
__UpperCamelCase = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
__UpperCamelCase = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:1_0]
__UpperCamelCase = np.frombuffer(image_from_tuple.tobytes() , dtype="""uint8""" )[:1_0]
__UpperCamelCase = np.array([6_9, 2_5_5, 2_5_5, 2_5_5, 0, 0, 7_7, 1_8_1, 1_2, 1_2_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
__UpperCamelCase = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
__UpperCamelCase = DDIMScheduler()
__UpperCamelCase = self.dummy_vqvae_and_unet
__UpperCamelCase = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=lowercase , scheduler=lowercase )
__UpperCamelCase = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
np.random.seed(0 )
__UpperCamelCase = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(4_2 )
__UpperCamelCase = pipe(raw_audio=lowercase , generator=lowercase , start_step=5 , steps=1_0 )
__UpperCamelCase = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
__UpperCamelCase = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:1_0]
__UpperCamelCase = np.array([1_2_0, 1_1_7, 1_1_0, 1_0_9, 1_3_8, 1_6_7, 1_3_8, 1_4_8, 1_3_2, 1_2_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
__UpperCamelCase = self.dummy_unet_condition
__UpperCamelCase = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=lowercase , mel=lowercase , scheduler=lowercase )
__UpperCamelCase = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
np.random.seed(0 )
__UpperCamelCase = torch.rand((1, 1, 1_0) )
__UpperCamelCase = pipe(generator=lowercase , encoding=lowercase )
__UpperCamelCase = output.images[0]
__UpperCamelCase = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:1_0]
__UpperCamelCase = np.array([1_0_7, 1_0_3, 1_2_0, 1_2_7, 1_4_2, 1_2_2, 1_1_3, 1_2_2, 9_7, 1_1_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase):
def __lowerCamelCase ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = torch_device
__UpperCamelCase = DiffusionPipeline.from_pretrained("""teticio/audio-diffusion-ddim-256""" )
__UpperCamelCase = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(4_2 )
__UpperCamelCase = pipe(generator=lowercase )
__UpperCamelCase = output.audios[0]
__UpperCamelCase = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
__UpperCamelCase = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:1_0]
__UpperCamelCase = np.array([1_5_1, 1_6_7, 1_5_4, 1_4_4, 1_2_2, 1_3_4, 1_2_1, 1_0_5, 7_0, 2_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 243
| 1
|
'''simple docstring'''
from __future__ import annotations
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> None:
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
UpperCamelCase ,UpperCamelCase = array[indexa], array[indexa]
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> None:
if length > 1:
UpperCamelCase = int(length / 2 )
for i in range(__UpperCamelCase , low + middle ):
comp_and_swap(__UpperCamelCase , __UpperCamelCase , i + middle , __UpperCamelCase )
bitonic_merge(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
bitonic_merge(__UpperCamelCase , low + middle , __UpperCamelCase , __UpperCamelCase )
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> None:
if length > 1:
UpperCamelCase = int(length / 2 )
bitonic_sort(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , 1 )
bitonic_sort(__UpperCamelCase , low + middle , __UpperCamelCase , 0 )
bitonic_merge(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = input('Enter numbers separated by a comma:\n').strip()
SCREAMING_SNAKE_CASE__ = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ')
| 321
|
'''simple docstring'''
def lowercase__ ( __UpperCamelCase = 2000000 )-> int:
UpperCamelCase = [0 for i in range(n + 1 )]
UpperCamelCase = 1
UpperCamelCase = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , __UpperCamelCase ):
UpperCamelCase = 1
UpperCamelCase = 0
for i in range(__UpperCamelCase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f'{solution() = }')
| 321
| 1
|
"""simple docstring"""
import qiskit
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
UpperCAmelCase = qiskit.Aer.get_backend('aer_simulator' )
UpperCAmelCase = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
UpperCAmelCase = qiskit.execute(lowercase_ , lowercase_ , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(lowercase_ )
if __name__ == "__main__":
snake_case_ = half_adder(1, 1)
print(f'''Half Adder Output Qubit Counts: {counts}''')
| 181
|
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
snake_case_ = 3
def _lowerCAmelCase ( lowercase_ ):
print('Generating primitive root of p' )
while True:
UpperCAmelCase = random.randrange(3 , lowercase_ )
if pow(lowercase_ , 2 , lowercase_ ) == 1:
continue
if pow(lowercase_ , lowercase_ , lowercase_ ) == 1:
continue
return g
def _lowerCAmelCase ( lowercase_ ):
print('Generating prime p...' )
UpperCAmelCase = rabin_miller.generate_large_prime(lowercase_ ) # select large prime number.
UpperCAmelCase = primitive_root(lowercase_ ) # one primitive root on modulo p.
UpperCAmelCase = random.randrange(3 , lowercase_ ) # private_key -> have to be greater than 2 for safety.
UpperCAmelCase = cryptomath.find_mod_inverse(pow(lowercase_ , lowercase_ , lowercase_ ) , lowercase_ )
UpperCAmelCase = (key_size, e_a, e_a, p)
UpperCAmelCase = (key_size, d)
return public_key, private_key
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ):
print('\nWARNING:' )
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
'Use a different name or delete these files and re-run this program.' )
sys.exit()
UpperCAmelCase , UpperCAmelCase = generate_key(lowercase_ )
print(F"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(F"""{name}_pubkey.txt""" , 'w' ) as fo:
fo.write(F"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(F"""Writing private key to file {name}_privkey.txt...""" )
with open(F"""{name}_privkey.txt""" , 'w' ) as fo:
fo.write(F"""{private_key[0]},{private_key[1]}""" )
def _lowerCAmelCase ( ):
print('Making key files...' )
make_key_files('elgamal' , 2048 )
print('Key files generation successful' )
if __name__ == "__main__":
main()
| 181
| 1
|
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
lowerCamelCase : Any = logging.getLogger(__name__)
lowerCamelCase : Optional[Any] = tf.data.AUTOTUNE
def _lowerCAmelCase ( ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =argparse.ArgumentParser(description='Train a masked language model on TPU.' )
parser.add_argument(
'--pretrained_model_config' , type=_UpperCamelCase , default='roberta-base' , help='The model config to use. Note that we don\'t copy the model\'s weights, only the config!' , )
parser.add_argument(
'--tokenizer' , type=_UpperCamelCase , default='unigram-tokenizer-wikitext' , help='The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.' , )
parser.add_argument(
'--per_replica_batch_size' , type=_UpperCamelCase , default=8 , help='Batch size per TPU core.' , )
parser.add_argument(
'--no_tpu' , action='store_true' , help='If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.' , )
parser.add_argument(
'--tpu_name' , type=_UpperCamelCase , help='Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.' , default='local' , )
parser.add_argument(
'--tpu_zone' , type=_UpperCamelCase , help='Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.' , )
parser.add_argument(
'--gcp_project' , type=_UpperCamelCase , help='Google cloud project name. Only used for non-Colab TPU nodes.' )
parser.add_argument(
'--bfloat16' , action='store_true' , help='Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.' , )
parser.add_argument(
'--train_dataset' , type=_UpperCamelCase , help='Path to training dataset to load. If the path begins with `gs://`'
' then the dataset will be loaded from a Google Cloud Storage bucket.' , )
parser.add_argument(
'--shuffle_buffer_size' , type=_UpperCamelCase , default=2**18 , help='Size of the shuffle buffer (in samples)' , )
parser.add_argument(
'--eval_dataset' , type=_UpperCamelCase , help='Path to evaluation dataset to load. If the path begins with `gs://`'
' then the dataset will be loaded from a Google Cloud Storage bucket.' , )
parser.add_argument(
'--num_epochs' , type=_UpperCamelCase , default=1 , help='Number of epochs to train for.' , )
parser.add_argument(
'--learning_rate' , type=_UpperCamelCase , default=1E-4 , help='Learning rate to use for training.' , )
parser.add_argument(
'--weight_decay_rate' , type=_UpperCamelCase , default=1E-3 , help='Weight decay rate to use for training.' , )
parser.add_argument(
'--max_length' , type=_UpperCamelCase , default=5_12 , help='Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py' , )
parser.add_argument(
'--mlm_probability' , type=_UpperCamelCase , default=0.15 , help='Fraction of tokens to mask during training.' , )
parser.add_argument('--output_dir' , type=_UpperCamelCase , required=_UpperCamelCase , help='Path to save model checkpoints to.' )
parser.add_argument('--hub_model_id' , type=_UpperCamelCase , help='Model ID to upload to on the Hugging Face Hub.' )
_SCREAMING_SNAKE_CASE =parser.parse_args()
return args
def _lowerCAmelCase ( _UpperCamelCase : Any ) -> int:
"""simple docstring"""
try:
if args.tpu_name:
_SCREAMING_SNAKE_CASE =tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
_SCREAMING_SNAKE_CASE =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
'Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or '
'--gcp_project. When running on a TPU VM, use --tpu_name local.' )
tf.config.experimental_connect_to_cluster(_UpperCamelCase )
tf.tpu.experimental.initialize_tpu_system(_UpperCamelCase )
return tpu
def _lowerCAmelCase ( _UpperCamelCase : List[Any] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0
for file in file_list:
_SCREAMING_SNAKE_CASE =file.split('/' )[-1]
_SCREAMING_SNAKE_CASE =re.search(r'-\d+-(\d+)\.tfrecord' , _UpperCamelCase ).group(1 )
_SCREAMING_SNAKE_CASE =int(_UpperCamelCase )
num_samples += sample_count
return num_samples
def _lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Dict , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any , _UpperCamelCase : str=None ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =count_samples(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =tf.data.Dataset.from_tensor_slices(_UpperCamelCase )
if shuffle:
_SCREAMING_SNAKE_CASE =dataset.shuffle(len(_UpperCamelCase ) )
_SCREAMING_SNAKE_CASE =tf.data.TFRecordDataset(_UpperCamelCase , num_parallel_reads=_UpperCamelCase )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
_SCREAMING_SNAKE_CASE =dataset.apply(tf.data.experimental.assert_cardinality(_UpperCamelCase ) )
_SCREAMING_SNAKE_CASE =dataset.map(_UpperCamelCase , num_parallel_calls=_UpperCamelCase )
if shuffle:
assert shuffle_buffer_size is not None
_SCREAMING_SNAKE_CASE =dataset.shuffle(args.shuffle_buffer_size )
_SCREAMING_SNAKE_CASE =dataset.batch(_UpperCamelCase , drop_remainder=_UpperCamelCase )
_SCREAMING_SNAKE_CASE =dataset.map(_UpperCamelCase , num_parallel_calls=_UpperCamelCase )
_SCREAMING_SNAKE_CASE =dataset.prefetch(_UpperCamelCase )
return dataset
def _lowerCAmelCase ( _UpperCamelCase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
if not args.no_tpu:
_SCREAMING_SNAKE_CASE =initialize_tpu(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =tf.distribute.TPUStrategy(_UpperCamelCase )
else:
_SCREAMING_SNAKE_CASE =tf.distribute.OneDeviceStrategy(device='/gpu:0' )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy('mixed_bfloat16' )
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(args.tokenizer )
_SCREAMING_SNAKE_CASE =AutoConfig.from_pretrained(args.pretrained_model_config )
_SCREAMING_SNAKE_CASE =tokenizer.vocab_size
_SCREAMING_SNAKE_CASE =tf.io.gfile.glob(os.path.join(args.train_dataset , '*.tfrecord' ) )
if not training_records:
raise ValueError(f"No .tfrecord files found in {args.train_dataset}." )
_SCREAMING_SNAKE_CASE =tf.io.gfile.glob(os.path.join(args.eval_dataset , '*.tfrecord' ) )
if not eval_records:
raise ValueError(f"No .tfrecord files found in {args.eval_dataset}." )
_SCREAMING_SNAKE_CASE =count_samples(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
_SCREAMING_SNAKE_CASE =steps_per_epoch * args.num_epochs
with strategy.scope():
_SCREAMING_SNAKE_CASE =TFAutoModelForMaskedLM.from_config(_UpperCamelCase )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =create_optimizer(
num_train_steps=_UpperCamelCase , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=_UpperCamelCase , metrics=['accuracy'] )
def decode_fn(_UpperCamelCase : Any ):
_SCREAMING_SNAKE_CASE ={
'input_ids': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
'attention_mask': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(_UpperCamelCase , _UpperCamelCase )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
_SCREAMING_SNAKE_CASE =DataCollatorForLanguageModeling(
tokenizer=_UpperCamelCase , mlm_probability=args.mlm_probability , mlm=_UpperCamelCase , return_tensors='tf' )
def mask_with_collator(_UpperCamelCase : List[str] ):
# TF really needs an isin() function
_SCREAMING_SNAKE_CASE =(
~tf.cast(batch['attention_mask'] , tf.bool )
| (batch['input_ids'] == tokenizer.cls_token_id)
| (batch['input_ids'] == tokenizer.sep_token_id)
)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =data_collator.tf_mask_tokens(
batch['input_ids'] , vocab_size=len(_UpperCamelCase ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=_UpperCamelCase , )
return batch
_SCREAMING_SNAKE_CASE =args.per_replica_batch_size * strategy.num_replicas_in_sync
_SCREAMING_SNAKE_CASE =prepare_dataset(
_UpperCamelCase , decode_fn=_UpperCamelCase , mask_fn=_UpperCamelCase , batch_size=_UpperCamelCase , shuffle=_UpperCamelCase , shuffle_buffer_size=args.shuffle_buffer_size , )
_SCREAMING_SNAKE_CASE =prepare_dataset(
_UpperCamelCase , decode_fn=_UpperCamelCase , mask_fn=_UpperCamelCase , batch_size=_UpperCamelCase , shuffle=_UpperCamelCase , )
_SCREAMING_SNAKE_CASE =[]
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=_UpperCamelCase ) )
model.fit(
_UpperCamelCase , validation_data=_UpperCamelCase , epochs=args.num_epochs , callbacks=_UpperCamelCase , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
lowerCamelCase : Union[str, Any] = parse_args()
main(args)
| 47
|
'''simple docstring'''
from math import factorial, radians
def __magic_name__( lowerCamelCase, lowerCamelCase = 1_8, lowerCamelCase = 1_0):
__lowerCAmelCase = angle_in_degrees - ((angle_in_degrees // 3_60.0) * 3_60.0)
# Converting from degrees to radians
__lowerCAmelCase = radians(lowerCamelCase)
__lowerCAmelCase = angle_in_radians
__lowerCAmelCase = 3
__lowerCAmelCase = -1
for _ in range(lowerCamelCase):
result += (b * (angle_in_radians**a)) / factorial(lowerCamelCase)
__lowerCAmelCase = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowerCamelCase, lowerCamelCase)
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 174
| 0
|
'''simple docstring'''
import numpy as np
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 1E-12 , lowerCAmelCase__ = 1_00 , ) -> tuple[float, np.ndarray]:
assert np.shape(lowerCAmelCase__ )[0] == np.shape(lowerCAmelCase__ )[1]
# Ensure proper dimensionality.
assert np.shape(lowerCAmelCase__ )[0] == np.shape(lowerCAmelCase__ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(lowerCAmelCase__ ) == np.iscomplexobj(lowerCAmelCase__ )
UpperCAmelCase__ : str = np.iscomplexobj(lowerCAmelCase__ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(lowerCAmelCase__ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : Tuple = 1E12
while not convergence:
# Multiple matrix by the vector.
UpperCAmelCase__ : Optional[Any] = np.dot(lowerCAmelCase__ , lowerCAmelCase__ )
# Normalize the resulting output vector.
UpperCAmelCase__ : Optional[int] = w / np.linalg.norm(lowerCAmelCase__ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
UpperCAmelCase__ : str = vector.conj().T if is_complex else vector.T
UpperCAmelCase__ : List[str] = np.dot(lowerCAmelCase__ , np.dot(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Check convergence.
UpperCAmelCase__ : int = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
UpperCAmelCase__ : List[str] = True
UpperCAmelCase__ : Dict = lambda_
if is_complex:
UpperCAmelCase__ : List[Any] = np.real(lambda_ )
return lambda_, vector
def a__ ( ) -> None:
UpperCAmelCase__ : Tuple = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
UpperCAmelCase__ : str = np.array([41, 4, 20] )
UpperCAmelCase__ : List[Any] = real_input_matrix.astype(np.complexaaa )
UpperCAmelCase__ : List[str] = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
UpperCAmelCase__ : Dict = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
UpperCAmelCase__ : str = real_input_matrix
UpperCAmelCase__ : Union[str, Any] = real_vector
elif problem_type == "complex":
UpperCAmelCase__ : Dict = complex_input_matrix
UpperCAmelCase__ : Optional[int] = complex_vector
# Our implementation.
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = power_iteration(lowerCAmelCase__ , lowerCAmelCase__ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = np.linalg.eigh(lowerCAmelCase__ )
# Last eigenvalue is the maximum one.
UpperCAmelCase__ : List[str] = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
UpperCAmelCase__ : Tuple = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(lowerCAmelCase__ ) - np.abs(lowerCAmelCase__ ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 299
|
'''simple docstring'''
from __future__ import annotations
import math
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if not scores:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , )
)
def a__ ( ) -> None:
UpperCAmelCase__ : Union[str, Any] = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
UpperCAmelCase__ : Optional[Any] = math.log(len(lowerCAmelCase__ ) , 2 )
print(F"""Optimal value : {minimax(0 , 0 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 299
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase = {
'configuration_nezha': ['NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'NezhaConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST',
'NezhaForNextSentencePrediction',
'NezhaForMaskedLM',
'NezhaForPreTraining',
'NezhaForMultipleChoice',
'NezhaForQuestionAnswering',
'NezhaForSequenceClassification',
'NezhaForTokenClassification',
'NezhaModel',
'NezhaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 110
|
'''simple docstring'''
from math import factorial, pi
def _UpperCamelCase ( __A , __A = 30 ) -> float:
'''simple docstring'''
if not isinstance(__A , (int, float) ):
raise ValueError("maclaurin_sin() requires either an int or float for theta" )
if not isinstance(__A , __A ) or accuracy <= 0:
raise ValueError("maclaurin_sin() requires a positive int for accuracy" )
UpperCamelCase__ = float(__A )
UpperCamelCase__ = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(__A ) )
def _UpperCamelCase ( __A , __A = 30 ) -> float:
'''simple docstring'''
if not isinstance(__A , (int, float) ):
raise ValueError("maclaurin_cos() requires either an int or float for theta" )
if not isinstance(__A , __A ) or accuracy <= 0:
raise ValueError("maclaurin_cos() requires a positive int for accuracy" )
UpperCamelCase__ = float(__A )
UpperCamelCase__ = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(__A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(1_0))
print(maclaurin_sin(-1_0))
print(maclaurin_sin(1_0, 1_5))
print(maclaurin_sin(-1_0, 1_5))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(1_0, 1_5))
print(maclaurin_cos(-1_0, 1_5))
| 80
| 0
|
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
if not head:
return True
# split the list to two parts
__lowerCamelCase, __lowerCamelCase = head.next, head
while fast and fast.next:
__lowerCamelCase = fast.next.next
__lowerCamelCase = slow.next
__lowerCamelCase = slow.next
__lowerCamelCase = None # Don't forget here! But forget still works!
# reverse the second part
__lowerCamelCase = None
while second:
__lowerCamelCase = second.next
__lowerCamelCase = node
__lowerCamelCase = second
__lowerCamelCase = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
__lowerCamelCase = node.next
__lowerCamelCase = head.next
return True
def lowerCamelCase__ ( A__ : Optional[Any] ):
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
__lowerCamelCase = __lowerCamelCase = __lowerCamelCase = head
while fast and fast.next:
__lowerCamelCase, __lowerCamelCase = fast.next.next, slow.next
# 2. Push the second half into the stack
__lowerCamelCase = [slow.val]
while slow.next:
__lowerCamelCase = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
__lowerCamelCase = cur.next
return True
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
if not head or not head.next:
return True
__lowerCamelCase = {}
__lowerCamelCase = 0
while head:
if head.val in d:
d[head.val].append(A__ )
else:
__lowerCamelCase = [pos]
__lowerCamelCase = head.next
pos += 1
__lowerCamelCase = pos - 1
__lowerCamelCase = 0
for v in d.values():
if len(A__ ) % 2 != 0:
middle += 1
else:
__lowerCamelCase = 0
for i in range(0 , len(A__ ) ):
if v[i] + v[len(A__ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 29
|
import requests
from bsa import BeautifulSoup
def lowerCamelCase__ ( A__ : str = "https://www.worldometers.info/coronavirus" ):
'''simple docstring'''
__lowerCamelCase = BeautifulSoup(requests.get(A__ ).text , """html.parser""" )
__lowerCamelCase = soup.findAll("""h1""" )
__lowerCamelCase = soup.findAll("""div""" , {"""class""": """maincounter-number"""} )
keys += soup.findAll("""span""" , {"""class""": """panel-title"""} )
values += soup.findAll("""div""" , {"""class""": """number-table-main"""} )
return {key.text.strip(): value.text.strip() for key, value in zip(A__ , A__ )}
if __name__ == "__main__":
print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n')
for key, value in world_covidaa_stats().items():
print(f"""{key}\n{value}\n""")
| 29
| 1
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
a__ : List[Any] =logging.get_logger(__name__)
@dataclass
class snake_case :
"""simple docstring"""
def __init__( self : List[Any] , __A : str=False , __A : Dict=False , __A : Any=6.0 , __A : Optional[Any]=None , __A : Any=False , __A : int=False , __A : Dict=None , __A : Dict="fp4" , __A : Union[str, Any]=False , **__A : List[str] , ):
__UpperCamelCase = load_in_abit
__UpperCamelCase = load_in_abit
__UpperCamelCase = llm_inta_threshold
__UpperCamelCase = llm_inta_skip_modules
__UpperCamelCase = llm_inta_enable_fpaa_cpu_offload
__UpperCamelCase = llm_inta_has_fpaa_weight
__UpperCamelCase = bnb_abit_quant_type
__UpperCamelCase = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
__UpperCamelCase = torch.floataa
elif isinstance(__A , __A ):
__UpperCamelCase = getattr(__A , __A )
elif isinstance(__A , torch.dtype ):
__UpperCamelCase = bnb_abit_compute_dtype
else:
raise ValueError('bnb_4bit_compute_dtype must be a string or a torch.dtype' )
self.post_init()
def _lowerCamelCase ( self : int ):
if not isinstance(self.llm_inta_threshold , __A ):
raise ValueError('llm_int8_threshold must be a float' )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , __A ):
raise ValueError('llm_int8_skip_modules must be a list of strings' )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , __A ):
raise ValueError('llm_int8_enable_fp32_cpu_offload must be a boolean' )
if not isinstance(self.llm_inta_has_fpaa_weight , __A ):
raise ValueError('llm_int8_has_fp16_weight must be a boolean' )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError('bnb_4bit_compute_dtype must be torch.dtype' )
if not isinstance(self.bnb_abit_quant_type , __A ):
raise ValueError('bnb_4bit_quant_type must be a string' )
if not isinstance(self.bnb_abit_use_double_quant , __A ):
raise ValueError('bnb_4bit_use_double_quant must be a boolean' )
if self.load_in_abit and not version.parse(importlib.metadata.version('bitsandbytes' ) ) >= version.parse(
'0.39.0' ):
raise ValueError(
'4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version' )
def _lowerCamelCase ( self : str ):
return self.load_in_abit or self.load_in_abit
def _lowerCamelCase ( self : Any ):
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def _lowerCamelCase ( cls : Dict , __A : Tuple , __A : Dict , **__A : List[Any] ):
__UpperCamelCase = cls(**__A )
__UpperCamelCase = []
for key, value in kwargs.items():
if hasattr(__A , __A ):
setattr(__A , __A , __A )
to_remove.append(__A )
for key in to_remove:
kwargs.pop(__A , __A )
if return_unused_kwargs:
return config, kwargs
else:
return config
def _lowerCamelCase ( self : Optional[Any] , __A : Union[str, os.PathLike] ):
with open(__A , 'w' , encoding='utf-8' ) as writer:
__UpperCamelCase = self.to_dict()
__UpperCamelCase = json.dumps(__A , indent=2 , sort_keys=__A ) + '\n'
writer.write(__A )
def _lowerCamelCase ( self : Optional[int] ):
__UpperCamelCase = copy.deepcopy(self.__dict__ )
__UpperCamelCase = str(output['bnb_4bit_compute_dtype'] ).split('.' )[1]
return output
def __repr__( self : Any ):
return f'''{self.__class__.__name__} {self.to_json_string()}'''
def _lowerCamelCase ( self : List[str] , __A : bool = True ):
if use_diff is True:
__UpperCamelCase = self.to_diff_dict()
else:
__UpperCamelCase = self.to_dict()
return json.dumps(__A , indent=2 , sort_keys=__A ) + "\n"
def _lowerCamelCase ( self : Union[str, Any] ):
__UpperCamelCase = self.to_dict()
# get the default config dict
__UpperCamelCase = BitsAndBytesConfig().to_dict()
__UpperCamelCase = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
__UpperCamelCase = value
return serializable_config_dict
| 53
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : List[Any] ={
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] =[
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
a__ : Optional[int] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 53
| 1
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase_ )
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : str = field(default="""question-answering-extractive""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
lowerCAmelCase_ : ClassVar[Features] = Features({"""question""": Value("""string""" ), """context""": Value("""string""" )} )
lowerCAmelCase_ : ClassVar[Features] = Features(
{
"""answers""": Sequence(
{
"""text""": Value("""string""" ),
"""answer_start""": Value("""int32""" ),
} )
} )
lowerCAmelCase_ : str = "question"
lowerCAmelCase_ : str = "context"
lowerCAmelCase_ : str = "answers"
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 61
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'google/pix2struct-textcaps-base': (
'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'
),
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : Dict = """pix2struct_text_model"""
lowerCAmelCase_ : str = ["""past_key_values"""]
lowerCAmelCase_ : Dict = {
"""hidden_size""": """hidden_size""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : List[str] , _UpperCAmelCase : Dict=5_02_44 , _UpperCAmelCase : Tuple=7_68 , _UpperCAmelCase : List[Any]=64 , _UpperCAmelCase : Dict=20_48 , _UpperCAmelCase : List[str]=12 , _UpperCAmelCase : Tuple=12 , _UpperCAmelCase : Dict=32 , _UpperCAmelCase : Any=1_28 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : str=1E-6 , _UpperCAmelCase : List[str]=1.0 , _UpperCAmelCase : str="gelu_new" , _UpperCAmelCase : str=0 , _UpperCAmelCase : Any=False , _UpperCAmelCase : List[Any]=0 , _UpperCAmelCase : Union[str, Any]=1 , _UpperCAmelCase : Dict=False , _UpperCAmelCase : Any=True , **_UpperCAmelCase : str , ):
"""simple docstring"""
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = d_kv
UpperCAmelCase__ = d_ff
UpperCAmelCase__ = num_layers
UpperCAmelCase__ = num_heads
UpperCAmelCase__ = relative_attention_num_buckets
UpperCAmelCase__ = relative_attention_max_distance
UpperCAmelCase__ = dropout_rate
UpperCAmelCase__ = layer_norm_epsilon
UpperCAmelCase__ = initializer_factor
UpperCAmelCase__ = use_cache
UpperCAmelCase__ = eos_token_id
UpperCAmelCase__ = decoder_start_token_id
# for backwards compatibility
UpperCAmelCase__ = dense_act_fn
super().__init__(
pad_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , is_decoder=_UpperCAmelCase , **_UpperCAmelCase , )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[Any] , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : int ):
"""simple docstring"""
cls._set_token_in_kwargs(_UpperCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
UpperCAmelCase__ = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = """pix2struct_vision_model"""
def __init__( self : Any , _UpperCAmelCase : List[Any]=7_68 , _UpperCAmelCase : Optional[int]=7_68 , _UpperCAmelCase : Dict=20_48 , _UpperCAmelCase : Tuple=64 , _UpperCAmelCase : Any=12 , _UpperCAmelCase : int=12 , _UpperCAmelCase : Dict="gelu_new" , _UpperCAmelCase : List[Any]=1E-6 , _UpperCAmelCase : int=0.0 , _UpperCAmelCase : str=0.0 , _UpperCAmelCase : Union[str, Any]=1E-10 , _UpperCAmelCase : Union[str, Any]=1.0 , _UpperCAmelCase : Optional[int]=40_96 , _UpperCAmelCase : Optional[int]=32 , _UpperCAmelCase : Dict=1_28 , **_UpperCAmelCase : int , ):
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = patch_embed_hidden_size
UpperCAmelCase__ = d_ff
UpperCAmelCase__ = dropout_rate
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = initializer_factor
UpperCAmelCase__ = attention_dropout
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = dense_act_fn
UpperCAmelCase__ = seq_len
UpperCAmelCase__ = relative_attention_num_buckets
UpperCAmelCase__ = relative_attention_max_distance
UpperCAmelCase__ = d_kv
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[Any] , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : Optional[int] ):
"""simple docstring"""
cls._set_token_in_kwargs(_UpperCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
UpperCAmelCase__ = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : str = """pix2struct"""
lowerCAmelCase_ : Union[str, Any] = True
def __init__( self : int , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Any=1.0 , _UpperCAmelCase : Tuple=0.02 , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : Union[str, Any]=False , _UpperCAmelCase : List[str]=True , **_UpperCAmelCase : Optional[int] , ):
"""simple docstring"""
super().__init__(tie_word_embeddings=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , **_UpperCAmelCase )
if text_config is None:
UpperCAmelCase__ = {}
logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" )
if vision_config is None:
UpperCAmelCase__ = {}
logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" )
UpperCAmelCase__ = PixaStructTextConfig(**_UpperCAmelCase )
UpperCAmelCase__ = PixaStructVisionConfig(**_UpperCAmelCase )
UpperCAmelCase__ = self.text_config.decoder_start_token_id
UpperCAmelCase__ = self.text_config.pad_token_id
UpperCAmelCase__ = self.text_config.eos_token_id
UpperCAmelCase__ = initializer_factor
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = self.initializer_range
UpperCAmelCase__ = self.initializer_range
UpperCAmelCase__ = is_vqa
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Any , _UpperCAmelCase : PixaStructTextConfig , _UpperCAmelCase : PixaStructVisionConfig , **_UpperCAmelCase : Any ):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = copy.deepcopy(self.__dict__ )
UpperCAmelCase__ = self.text_config.to_dict()
UpperCAmelCase__ = self.vision_config.to_dict()
UpperCAmelCase__ = self.__class__.model_type
return output
| 61
| 1
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase_ ( metaclass=__lowercase ):
lowerCamelCase : int = ['''onnx''']
def __init__( self : Dict , *UpperCAmelCase__ : str , **UpperCAmelCase__ : int ) -> Dict:
requires_backends(self , ['onnx'] )
@classmethod
def __UpperCAmelCase ( cls : Any , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Union[str, Any] ) -> Union[str, Any]:
requires_backends(cls , ['onnx'] )
@classmethod
def __UpperCAmelCase ( cls : Dict , *UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
requires_backends(cls , ['onnx'] )
| 4
|
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__snake_case ={
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
__snake_case ={
"""facebook/blenderbot_small-90M""": 512,
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Tuple = VOCAB_FILES_NAMES
lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = BlenderbotSmallTokenizer
def __init__( self : Any , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : int="<|endoftext|>" , UpperCAmelCase__ : Dict="<|endoftext|>" , UpperCAmelCase__ : str="<|endoftext|>" , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Tuple=True , **UpperCAmelCase__ : Optional[Any] , ) -> Any:
super().__init__(
ByteLevelBPETokenizer(
vocab=UpperCAmelCase__ , merges=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ , ) , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowerCAmelCase = add_prefix_space
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict=None ) -> Any:
lowerCAmelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 4
| 1
|
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A__ :
'''simple docstring'''
def __init__( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Optional[Any]=13 , _SCREAMING_SNAKE_CASE: List[str]=30 , _SCREAMING_SNAKE_CASE: List[Any]=2 , _SCREAMING_SNAKE_CASE: Dict=3 , _SCREAMING_SNAKE_CASE: int=True , _SCREAMING_SNAKE_CASE: int=True , _SCREAMING_SNAKE_CASE: int=32 , _SCREAMING_SNAKE_CASE: Tuple=2 , _SCREAMING_SNAKE_CASE: Union[str, Any]=4 , _SCREAMING_SNAKE_CASE: List[str]=37 , _SCREAMING_SNAKE_CASE: Union[str, Any]="gelu" , _SCREAMING_SNAKE_CASE: List[Any]=0.1 , _SCREAMING_SNAKE_CASE: List[str]=0.1 , _SCREAMING_SNAKE_CASE: Dict=10 , _SCREAMING_SNAKE_CASE: Optional[int]=0.02 , _SCREAMING_SNAKE_CASE: List[str]=3 , _SCREAMING_SNAKE_CASE: List[str]=0.6 , _SCREAMING_SNAKE_CASE: List[str]=None , ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Tuple = parent
__lowerCAmelCase : Any = batch_size
__lowerCAmelCase : Any = image_size
__lowerCAmelCase : Optional[int] = patch_size
__lowerCAmelCase : List[str] = num_channels
__lowerCAmelCase : List[str] = is_training
__lowerCAmelCase : Dict = use_labels
__lowerCAmelCase : Union[str, Any] = hidden_size
__lowerCAmelCase : Tuple = num_hidden_layers
__lowerCAmelCase : Optional[Any] = num_attention_heads
__lowerCAmelCase : Any = intermediate_size
__lowerCAmelCase : int = hidden_act
__lowerCAmelCase : int = hidden_dropout_prob
__lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
__lowerCAmelCase : str = type_sequence_label_size
__lowerCAmelCase : Optional[int] = initializer_range
__lowerCAmelCase : List[str] = mask_ratio
__lowerCAmelCase : Tuple = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
__lowerCAmelCase : str = (image_size // patch_size) ** 2
__lowerCAmelCase : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1)))
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> Any:
"""simple docstring"""
__lowerCAmelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__lowerCAmelCase : Optional[int] = None
if self.use_labels:
__lowerCAmelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__lowerCAmelCase : List[Any] = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Tuple:
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: Dict) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Any = TFViTMAEModel(config=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _SCREAMING_SNAKE_CASE ( self: Dict , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Tuple) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : int = TFViTMAEForPreTraining(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = model(_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE)
# expected sequence length = num_patches
__lowerCAmelCase : Dict = (self.image_size // self.patch_size) ** 2
__lowerCAmelCase : str = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels))
# test greyscale images
__lowerCAmelCase : List[Any] = 1
__lowerCAmelCase : Tuple = TFViTMAEForPreTraining(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels))
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Any:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
((__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase)) : int = config_and_inputs
__lowerCAmelCase : Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
SCREAMING_SNAKE_CASE = {'feature-extraction': TFViTMAEModel} if is_tf_available() else {}
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> str:
"""simple docstring"""
__lowerCAmelCase : int = TFViTMAEModelTester(self)
__lowerCAmelCase : Optional[int] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37)
def _SCREAMING_SNAKE_CASE ( self: Any) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds")
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> List[str]:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : int = model_class(_SCREAMING_SNAKE_CASE)
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer))
__lowerCAmelCase : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , tf.keras.layers.Layer))
def _SCREAMING_SNAKE_CASE ( self: Any) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : List[str] = model_class(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase : str = [*signature.parameters.keys()]
__lowerCAmelCase : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> Any:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> str:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> List[Any]:
"""simple docstring"""
np.random.seed(2)
__lowerCAmelCase , __lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : Tuple = int((config.image_size // config.patch_size) ** 2)
__lowerCAmelCase : Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
for model_class in self.all_model_classes:
__lowerCAmelCase : Dict = model_class(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = model(_SCREAMING_SNAKE_CASE , noise=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = copy.deepcopy(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE))
__lowerCAmelCase : List[str] = model(**_SCREAMING_SNAKE_CASE , noise=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = outputs_dict[0].numpy()
__lowerCAmelCase : int = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords)) , 1e-6)
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> Tuple:
"""simple docstring"""
np.random.seed(2)
__lowerCAmelCase , __lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : str = int((config.image_size // config.patch_size) ** 2)
__lowerCAmelCase : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
def prepare_numpy_arrays(_SCREAMING_SNAKE_CASE: Union[str, Any]):
__lowerCAmelCase : Optional[Any] = {}
for k, v in inputs_dict.items():
if tf.is_tensor(_SCREAMING_SNAKE_CASE):
__lowerCAmelCase : List[str] = v.numpy()
else:
__lowerCAmelCase : Optional[int] = np.array(_SCREAMING_SNAKE_CASE)
return inputs_np_dict
for model_class in self.all_model_classes:
__lowerCAmelCase : Tuple = model_class(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Any = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = prepare_numpy_arrays(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = model(_SCREAMING_SNAKE_CASE , noise=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = model(**_SCREAMING_SNAKE_CASE , noise=_SCREAMING_SNAKE_CASE)
self.assert_outputs_same(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Tuple) -> Union[str, Any]:
"""simple docstring"""
np.random.seed(2)
__lowerCAmelCase : Optional[Any] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2)
__lowerCAmelCase : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
__lowerCAmelCase : int = tf.constant(_SCREAMING_SNAKE_CASE)
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
__lowerCAmelCase : Tuple = tf_noise
super().check_pt_tf_models(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Any:
"""simple docstring"""
np.random.seed(2)
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : List[Any] = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__),)
for module_member_name in dir(_SCREAMING_SNAKE_CASE)
if module_member_name.endswith("MainLayer")
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("MainLayer")] == model_class.__name__[: -len("Model")]
for module_member in (getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE),)
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
and tf.keras.layers.Layer in module_member.__bases__
and getattr(_SCREAMING_SNAKE_CASE , "_keras_serializable" , _SCREAMING_SNAKE_CASE)
}
__lowerCAmelCase : Tuple = int((config.image_size // config.patch_size) ** 2)
__lowerCAmelCase : Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
__lowerCAmelCase : Union[str, Any] = tf.convert_to_tensor(_SCREAMING_SNAKE_CASE)
inputs_dict.update({"noise": noise})
for main_layer_class in tf_main_layer_classes:
__lowerCAmelCase : List[Any] = main_layer_class(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype) for name, tensor in inputs_dict.items()
}
__lowerCAmelCase : List[Any] = tf.keras.Model(_SCREAMING_SNAKE_CASE , outputs=main_layer(_SCREAMING_SNAKE_CASE))
__lowerCAmelCase : str = model(_SCREAMING_SNAKE_CASE)
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase : Union[str, Any] = os.path.join(_SCREAMING_SNAKE_CASE , "keras_model.h5")
model.save(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = tf.keras.models.load_model(
_SCREAMING_SNAKE_CASE , custom_objects={main_layer_class.__name__: main_layer_class})
assert isinstance(_SCREAMING_SNAKE_CASE , tf.keras.Model)
__lowerCAmelCase : List[str] = model(_SCREAMING_SNAKE_CASE)
self.assert_outputs_same(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
@slow
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> int:
"""simple docstring"""
np.random.seed(2)
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : str = int((config.image_size // config.patch_size) ** 2)
__lowerCAmelCase : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
for model_class in self.all_model_classes:
__lowerCAmelCase : Union[str, Any] = model_class(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE , noise=_SCREAMING_SNAKE_CASE)
if model_class.__name__ == "TFViTMAEModel":
__lowerCAmelCase : Optional[Any] = outputs.last_hidden_state.numpy()
__lowerCAmelCase : Optional[int] = 0
else:
__lowerCAmelCase : Dict = outputs.logits.numpy()
__lowerCAmelCase : Optional[Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_SCREAMING_SNAKE_CASE , saved_model=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = model_class.from_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = model(_SCREAMING_SNAKE_CASE , noise=_SCREAMING_SNAKE_CASE)
if model_class.__name__ == "TFViTMAEModel":
__lowerCAmelCase : Optional[int] = after_outputs["last_hidden_state"].numpy()
__lowerCAmelCase : int = 0
else:
__lowerCAmelCase : Union[str, Any] = after_outputs["logits"].numpy()
__lowerCAmelCase : List[str] = 0
__lowerCAmelCase : Dict = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-5)
def _SCREAMING_SNAKE_CASE ( self: int) -> List[Any]:
"""simple docstring"""
np.random.seed(2)
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : List[Any] = int((config.image_size // config.patch_size) ** 2)
__lowerCAmelCase : str = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
for model_class in self.all_model_classes:
__lowerCAmelCase : Optional[Any] = model_class(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Any = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE , noise=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Any = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = model_class.from_config(model.get_config())
# make sure it also accepts a normal config
__lowerCAmelCase : Tuple = model_class.from_config(model.config)
__lowerCAmelCase : Any = new_model(_SCREAMING_SNAKE_CASE) # Build model
new_model.set_weights(model.get_weights())
__lowerCAmelCase : Union[str, Any] = new_model(_SCREAMING_SNAKE_CASE , noise=_SCREAMING_SNAKE_CASE)
self.assert_outputs_same(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.")
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load")
def _SCREAMING_SNAKE_CASE ( self: str) -> Any:
"""simple docstring"""
pass
@slow
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Any:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = TFViTMAEModel.from_pretrained("google/vit-base-patch16-224")
self.assertIsNotNone(_SCREAMING_SNAKE_CASE)
def _lowercase ( ) -> Optional[int]:
__lowerCAmelCase : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class A__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _SCREAMING_SNAKE_CASE ( self: int) -> Tuple:
"""simple docstring"""
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base") if is_vision_available() else None
@slow
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> List[str]:
"""simple docstring"""
np.random.seed(2)
__lowerCAmelCase : int = TFViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base")
__lowerCAmelCase : Tuple = self.default_image_processor
__lowerCAmelCase : str = prepare_img()
__lowerCAmelCase : int = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="tf")
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
__lowerCAmelCase : Any = ViTMAEConfig()
__lowerCAmelCase : str = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2)
__lowerCAmelCase : Union[str, Any] = np.random.uniform(size=(1, num_patches))
# forward pass
__lowerCAmelCase : Union[str, Any] = model(**_SCREAMING_SNAKE_CASE , noise=_SCREAMING_SNAKE_CASE)
# verify the logits
__lowerCAmelCase : Optional[Any] = tf.convert_to_tensor([1, 196, 768])
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = tf.convert_to_tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]])
tf.debugging.assert_near(outputs.logits[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4)
| 58
|
"""simple docstring"""
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class A__ ( unittest.TestCase ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> Optional[int]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Any = ort.SessionOptions()
__lowerCAmelCase : Dict = False
return options
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png")
__lowerCAmelCase : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png")
__lowerCAmelCase : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy")
# using the PNDM scheduler by default
__lowerCAmelCase : int = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = "A red cat sitting on a park bench"
__lowerCAmelCase : Optional[int] = np.random.RandomState(0)
__lowerCAmelCase : Optional[int] = pipe(
prompt=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , mask_image=_SCREAMING_SNAKE_CASE , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=_SCREAMING_SNAKE_CASE , output_type="np" , )
__lowerCAmelCase : int = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 1e-2
| 58
| 1
|
"""simple docstring"""
print((lambda quine: quine % quine)("""print((lambda quine: quine %% quine)(%r))"""))
| 96
|
import math
def lowerCAmelCase__ ( lowerCamelCase_ : int):
'''simple docstring'''
if not isinstance(lowerCamelCase_ ,lowerCamelCase_):
lowerCAmelCase__ : Union[str, Any] = f"""Input value of [number={number}] must be an integer"""
raise TypeError(lowerCamelCase_)
if number < 1:
lowerCAmelCase__ : Dict = f"""Input value of [number={number}] must be > 0"""
raise ValueError(lowerCamelCase_)
elif number == 1:
return 3
elif number == 2:
return 5
else:
lowerCAmelCase__ : Optional[Any] = int(math.log(number // 3 ,2)) + 2
lowerCAmelCase__ : Optional[Any] = [3, 5]
lowerCAmelCase__ : List[Any] = 2
lowerCAmelCase__ : Tuple = 3
for block in range(1 ,lowerCamelCase_):
for _ in range(lowerCamelCase_):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1])
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(1_1):
__snake_case : Optional[int] =0
try:
__snake_case : List[Any] =proth(number)
except ValueError:
print(f"""ValueError: there is no {number}th Proth number""")
continue
print(f"""The {number}th Proth number: {value}""")
| 129
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'google/mobilenet_v1_1.0_224': 'https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json',
'google/mobilenet_v1_0.75_192': 'https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : List[str] = "mobilenet_v1"
def __init__( self : Tuple ,_snake_case : Dict=3 ,_snake_case : List[str]=224 ,_snake_case : Union[str, Any]=1.0 ,_snake_case : int=8 ,_snake_case : Any="relu6" ,_snake_case : Optional[int]=True ,_snake_case : List[Any]=0.999 ,_snake_case : List[str]=0.02 ,_snake_case : str=0.001 ,**_snake_case : Optional[int] ,) -> int:
"""simple docstring"""
super().__init__(**_snake_case )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
lowercase__ : Tuple = num_channels
lowercase__ : int = image_size
lowercase__ : Optional[int] = depth_multiplier
lowercase__ : List[Any] = min_depth
lowercase__ : Optional[int] = hidden_act
lowercase__ : int = tf_padding
lowercase__ : List[str] = classifier_dropout_prob
lowercase__ : Dict = initializer_range
lowercase__ : Union[str, Any] = layer_norm_eps
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = version.parse("1.11" )
@property
def UpperCAmelCase ( self : Any ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def UpperCAmelCase ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def UpperCAmelCase ( self : int ) -> float:
"""simple docstring"""
return 1e-4
| 371
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {
'configuration_roberta': ['ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RobertaConfig', 'RobertaOnnxConfig'],
'tokenization_roberta': ['RobertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['RobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaForCausalLM',
'RobertaForMaskedLM',
'RobertaForMultipleChoice',
'RobertaForQuestionAnswering',
'RobertaForSequenceClassification',
'RobertaForTokenClassification',
'RobertaModel',
'RobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaForCausalLM',
'TFRobertaForMaskedLM',
'TFRobertaForMultipleChoice',
'TFRobertaForQuestionAnswering',
'TFRobertaForSequenceClassification',
'TFRobertaForTokenClassification',
'TFRobertaMainLayer',
'TFRobertaModel',
'TFRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'FlaxRobertaForCausalLM',
'FlaxRobertaForMaskedLM',
'FlaxRobertaForMultipleChoice',
'FlaxRobertaForQuestionAnswering',
'FlaxRobertaForSequenceClassification',
'FlaxRobertaForTokenClassification',
'FlaxRobertaModel',
'FlaxRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 302
| 0
|
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
A__ = '''__DUMMY_TRANSFORMERS_USER__'''
A__ = '''Dummy User'''
A__ = '''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'''
A__ = '''https://hub-ci.huggingface.co'''
A__ = CI_HUB_ENDPOINT + '''/datasets/{repo_id}/resolve/{revision}/{path}'''
A__ = CI_HUB_ENDPOINT + '''/{repo_id}/resolve/{revision}/{filename}'''
A__ = Path('''~/.huggingface/hub_ci_token''').expanduser()
@pytest.fixture
def _lowerCAmelCase ( __lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
monkeypatch.setattr(
'''huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE''' , __lowerCAmelCase )
@pytest.fixture
def _lowerCAmelCase ( __lowerCAmelCase ) -> List[str]:
"""simple docstring"""
monkeypatch.setattr('''datasets.config.HF_ENDPOINT''' , __lowerCAmelCase )
monkeypatch.setattr('''datasets.config.HUB_DATASETS_URL''' , __lowerCAmelCase )
@pytest.fixture
def _lowerCAmelCase ( __lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
monkeypatch.setattr('''huggingface_hub.hf_api.HfFolder.path_token''' , __lowerCAmelCase )
@pytest.fixture
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
"""simple docstring"""
HfFolder.save_token(__lowerCAmelCase )
yield
HfFolder.delete_token()
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( ) -> List[str]:
"""simple docstring"""
return HfApi(endpoint=__lowerCAmelCase )
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( __lowerCAmelCase ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : int = HfFolder.get_token()
HfFolder.save_token(__lowerCAmelCase )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(__lowerCAmelCase )
@pytest.fixture
def _lowerCAmelCase ( __lowerCAmelCase ) -> Tuple:
"""simple docstring"""
def _cleanup_repo(__lowerCAmelCase ):
hf_api.delete_repo(__lowerCAmelCase , token=__lowerCAmelCase , repo_type='''dataset''' )
return _cleanup_repo
@pytest.fixture
def _lowerCAmelCase ( __lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
@contextmanager
def _temporary_repo(__lowerCAmelCase ):
try:
yield repo_id
finally:
cleanup_repo(__lowerCAmelCase )
return _temporary_repo
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
"""simple docstring"""
snake_case__ : Tuple = f"""repo_txt_data-{int(time.time() * 10E3 )}"""
snake_case__ : str = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__lowerCAmelCase , token=__lowerCAmelCase , repo_type='''dataset''' , private=__lowerCAmelCase )
hf_api.upload_file(
token=__lowerCAmelCase , path_or_fileobj=str(__lowerCAmelCase ) , path_in_repo='''data/text_data.txt''' , repo_id=__lowerCAmelCase , repo_type='''dataset''' , )
yield repo_id
try:
hf_api.delete_repo(__lowerCAmelCase , token=__lowerCAmelCase , repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
"""simple docstring"""
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
snake_case__ : int = f"""repo_zipped_txt_data-{int(time.time() * 10E3 )}"""
snake_case__ : Dict = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__lowerCAmelCase , token=__lowerCAmelCase , repo_type='''dataset''' , private=__lowerCAmelCase )
hf_api.upload_file(
token=__lowerCAmelCase , path_or_fileobj=str(__lowerCAmelCase ) , path_in_repo='''data.zip''' , repo_id=__lowerCAmelCase , repo_type='''dataset''' , )
yield repo_id
try:
hf_api.delete_repo(__lowerCAmelCase , token=__lowerCAmelCase , repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
"""simple docstring"""
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
"""simple docstring"""
snake_case__ : Dict = f"""repo_zipped_img_data-{int(time.time() * 10E3 )}"""
snake_case__ : Optional[int] = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__lowerCAmelCase , token=__lowerCAmelCase , repo_type='''dataset''' , private=__lowerCAmelCase )
hf_api.upload_file(
token=__lowerCAmelCase , path_or_fileobj=str(__lowerCAmelCase ) , path_in_repo='''data.zip''' , repo_id=__lowerCAmelCase , repo_type='''dataset''' , )
yield repo_id
try:
hf_api.delete_repo(__lowerCAmelCase , token=__lowerCAmelCase , repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
"""simple docstring"""
return hf_private_dataset_repo_zipped_img_data_
| 230
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
A__ = logging.get_logger(__name__)
A__ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A__ = {
'''vocab_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-openqa''': (
'''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-reader''': (
'''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-openqa''': (
'''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-reader''': (
'''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'''
),
},
}
A__ = {
'''google/realm-cc-news-pretrained-embedder''': 512,
'''google/realm-cc-news-pretrained-encoder''': 512,
'''google/realm-cc-news-pretrained-scorer''': 512,
'''google/realm-cc-news-pretrained-openqa''': 512,
'''google/realm-orqa-nq-openqa''': 512,
'''google/realm-orqa-nq-reader''': 512,
'''google/realm-orqa-wq-openqa''': 512,
'''google/realm-orqa-wq-reader''': 512,
}
A__ = {
'''google/realm-cc-news-pretrained-embedder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-encoder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-scorer''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-reader''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-reader''': {'''do_lower_case''': True},
}
class a ( __lowerCamelCase ):
__lowerCAmelCase : List[Any] = VOCAB_FILES_NAMES
__lowerCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Optional[Any] = RealmTokenizer
def __init__( self :Optional[Any] ,__lowercase :Dict=None ,__lowercase :Optional[int]=None ,__lowercase :Optional[Any]=True ,__lowercase :Optional[int]="[UNK]" ,__lowercase :List[str]="[SEP]" ,__lowercase :List[str]="[PAD]" ,__lowercase :int="[CLS]" ,__lowercase :str="[MASK]" ,__lowercase :Dict=True ,__lowercase :List[str]=None ,**__lowercase :Any ,):
super().__init__(
__lowercase ,tokenizer_file=__lowercase ,do_lower_case=__lowercase ,unk_token=__lowercase ,sep_token=__lowercase ,pad_token=__lowercase ,cls_token=__lowercase ,mask_token=__lowercase ,tokenize_chinese_chars=__lowercase ,strip_accents=__lowercase ,**__lowercase ,)
snake_case__ : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' ,__lowercase ) != do_lower_case
or normalizer_state.get('''strip_accents''' ,__lowercase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' ,__lowercase ) != tokenize_chinese_chars
):
snake_case__ : Optional[int] = getattr(__lowercase ,normalizer_state.pop('''type''' ) )
snake_case__ : List[Any] = do_lower_case
snake_case__ : Optional[Any] = strip_accents
snake_case__ : List[str] = tokenize_chinese_chars
snake_case__ : Dict = normalizer_class(**__lowercase )
snake_case__ : Tuple = do_lower_case
def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :Union[str, Any] ,**__lowercase :Any ):
snake_case__ : Dict = PaddingStrategy.MAX_LENGTH
snake_case__ : List[str] = text
snake_case__ : int = kwargs.pop('''text_pair''' ,__lowercase )
snake_case__ : Optional[int] = kwargs.pop('''return_tensors''' ,__lowercase )
snake_case__ : str = {
'''input_ids''': [],
'''attention_mask''': [],
'''token_type_ids''': [],
}
for idx, candidate_text in enumerate(__lowercase ):
if batch_text_pair is not None:
snake_case__ : Optional[int] = batch_text_pair[idx]
else:
snake_case__ : Tuple = None
snake_case__ : List[str] = super().__call__(__lowercase ,__lowercase ,return_tensors=__lowercase ,**__lowercase )
snake_case__ : Optional[Any] = encoded_candidates.get('''input_ids''' )
snake_case__ : Optional[Any] = encoded_candidates.get('''attention_mask''' )
snake_case__ : List[Any] = encoded_candidates.get('''token_type_ids''' )
if encoded_input_ids is not None:
output_data["input_ids"].append(__lowercase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(__lowercase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(__lowercase )
snake_case__ : Any = {key: item for key, item in output_data.items() if len(__lowercase ) != 0}
return BatchEncoding(__lowercase ,tensor_type=__lowercase )
def __lowerCamelCase ( self :List[Any] ,__lowercase :Tuple ,__lowercase :Tuple=None ):
snake_case__ : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCamelCase ( self :List[str] ,__lowercase :List[int] ,__lowercase :Optional[List[int]] = None ):
snake_case__ : Tuple = [self.sep_token_id]
snake_case__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCamelCase ( self :Optional[Any] ,__lowercase :str ,__lowercase :Optional[str] = None ):
snake_case__ : Tuple = self._tokenizer.model.save(__lowercase ,name=__lowercase )
return tuple(__lowercase )
| 230
| 1
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
lowerCAmelCase : List[Any] = (720, 1280) # Height, Width
lowerCAmelCase : str = (0.4, 0.6) # if height or width lower than this scale, drop it.
lowerCAmelCase : Any = 1 / 100
lowerCAmelCase : Union[str, Any] = """"""
lowerCAmelCase : Optional[Any] = """"""
lowerCAmelCase : List[Any] = """"""
lowerCAmelCase : Union[str, Any] = 250
def A_ ( ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = get_dataset(_UpperCAmelCase , _UpperCAmelCase )
for index in range(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[Any] = random.sample(range(len(_UpperCAmelCase ) ) , 4 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = update_image_and_anno(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , filter_scale=_UpperCAmelCase , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
SCREAMING_SNAKE_CASE_: Tuple = random_chars(32 )
SCREAMING_SNAKE_CASE_: List[Any] = path.split(os.sep )[-1].rsplit("." , 1 )[0]
SCREAMING_SNAKE_CASE_: Union[str, Any] = f"{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"
cva.imwrite(f"{file_root}.jpg" , _UpperCAmelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}" )
SCREAMING_SNAKE_CASE_: Optional[Any] = []
for anno in new_annos:
SCREAMING_SNAKE_CASE_: str = anno[3] - anno[1]
SCREAMING_SNAKE_CASE_: int = anno[4] - anno[2]
SCREAMING_SNAKE_CASE_: Any = anno[1] + width / 2
SCREAMING_SNAKE_CASE_: Tuple = anno[2] + height / 2
SCREAMING_SNAKE_CASE_: str = f"{anno[0]} {x_center} {y_center} {width} {height}"
annos_list.append(_UpperCAmelCase )
with open(f"{file_root}.txt" , "w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = []
SCREAMING_SNAKE_CASE_: List[Any] = []
for label_file in glob.glob(os.path.join(_UpperCAmelCase , "*.txt" ) ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = label_file.split(os.sep )[-1].rsplit("." , 1 )[0]
with open(_UpperCAmelCase ) as in_file:
SCREAMING_SNAKE_CASE_: str = in_file.readlines()
SCREAMING_SNAKE_CASE_: List[str] = os.path.join(_UpperCAmelCase , f"{label_name}.jpg" )
SCREAMING_SNAKE_CASE_: List[Any] = []
for obj_list in obj_lists:
SCREAMING_SNAKE_CASE_: Optional[Any] = obj_list.rstrip("\n" ).split(" " )
SCREAMING_SNAKE_CASE_: Optional[int] = float(obj[1] ) - float(obj[3] ) / 2
SCREAMING_SNAKE_CASE_: Tuple = float(obj[2] ) - float(obj[4] ) / 2
SCREAMING_SNAKE_CASE_: Dict = float(obj[1] ) + float(obj[3] ) / 2
SCREAMING_SNAKE_CASE_: Union[str, Any] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(_UpperCAmelCase )
labels.append(_UpperCAmelCase )
return img_paths, labels
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 0.0 , ):
SCREAMING_SNAKE_CASE_: List[Any] = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
SCREAMING_SNAKE_CASE_: int = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
SCREAMING_SNAKE_CASE_: str = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
SCREAMING_SNAKE_CASE_: Optional[Any] = int(scale_x * output_size[1] )
SCREAMING_SNAKE_CASE_: List[Any] = int(scale_y * output_size[0] )
SCREAMING_SNAKE_CASE_: int = []
SCREAMING_SNAKE_CASE_: Union[str, Any] = []
for i, index in enumerate(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Any = all_img_list[index]
path_list.append(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple = all_annos[index]
SCREAMING_SNAKE_CASE_: Tuple = cva.imread(_UpperCAmelCase )
if i == 0: # top-left
SCREAMING_SNAKE_CASE_: Optional[int] = cva.resize(_UpperCAmelCase , (divid_point_x, divid_point_y) )
SCREAMING_SNAKE_CASE_: Optional[Any] = img
for bbox in img_annos:
SCREAMING_SNAKE_CASE_: str = bbox[1] * scale_x
SCREAMING_SNAKE_CASE_: Tuple = bbox[2] * scale_y
SCREAMING_SNAKE_CASE_: Optional[Any] = bbox[3] * scale_x
SCREAMING_SNAKE_CASE_: Tuple = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
SCREAMING_SNAKE_CASE_: int = cva.resize(_UpperCAmelCase , (output_size[1] - divid_point_x, divid_point_y) )
SCREAMING_SNAKE_CASE_: int = img
for bbox in img_annos:
SCREAMING_SNAKE_CASE_: int = scale_x + bbox[1] * (1 - scale_x)
SCREAMING_SNAKE_CASE_: Dict = bbox[2] * scale_y
SCREAMING_SNAKE_CASE_: int = scale_x + bbox[3] * (1 - scale_x)
SCREAMING_SNAKE_CASE_: List[Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
SCREAMING_SNAKE_CASE_: Dict = cva.resize(_UpperCAmelCase , (divid_point_x, output_size[0] - divid_point_y) )
SCREAMING_SNAKE_CASE_: List[Any] = img
for bbox in img_annos:
SCREAMING_SNAKE_CASE_: int = bbox[1] * scale_x
SCREAMING_SNAKE_CASE_: List[Any] = scale_y + bbox[2] * (1 - scale_y)
SCREAMING_SNAKE_CASE_: int = bbox[3] * scale_x
SCREAMING_SNAKE_CASE_: Optional[int] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
SCREAMING_SNAKE_CASE_: Any = cva.resize(
_UpperCAmelCase , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
SCREAMING_SNAKE_CASE_: List[str] = img
for bbox in img_annos:
SCREAMING_SNAKE_CASE_: Dict = scale_x + bbox[1] * (1 - scale_x)
SCREAMING_SNAKE_CASE_: str = scale_y + bbox[2] * (1 - scale_y)
SCREAMING_SNAKE_CASE_: Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
SCREAMING_SNAKE_CASE_: List[str] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
SCREAMING_SNAKE_CASE_: List[str] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def A_ ( _UpperCAmelCase ):
assert number_char > 1, "The number of character should greater than 1"
SCREAMING_SNAKE_CASE_: Dict = ascii_lowercase + digits
return "".join(random.choice(_UpperCAmelCase ) for _ in range(_UpperCAmelCase ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 355
|
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : List[str] = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {
"""nielsr/canine-s""": 2048,
}
# Unicode defines 1,114,112 total “codepoints”
lowerCAmelCase : Any = 1114112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
lowerCAmelCase : Dict = 0
lowerCAmelCase : Dict = 0xe000
lowerCAmelCase : str = 0xe001
lowerCAmelCase : str = 0xe002
lowerCAmelCase : Optional[int] = 0xe003
lowerCAmelCase : List[Any] = 0xe004
# Maps special codepoints to human-readable names.
lowerCAmelCase : Dict[int, str] = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
lowerCAmelCase : Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[Any] , lowerCAmelCase__ : Tuple=chr(lowerCAmelCase__) , lowerCAmelCase__ : Tuple=chr(lowerCAmelCase__) , lowerCAmelCase__ : str=chr(lowerCAmelCase__) , lowerCAmelCase__ : List[Any]=chr(lowerCAmelCase__) , lowerCAmelCase__ : Optional[int]=chr(lowerCAmelCase__) , lowerCAmelCase__ : Union[str, Any]=chr(lowerCAmelCase__) , lowerCAmelCase__ : Union[str, Any]=False , lowerCAmelCase__ : int=2048 , **lowerCAmelCase__ : Tuple , ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else bos_token
SCREAMING_SNAKE_CASE_: List[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else eos_token
SCREAMING_SNAKE_CASE_: List[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else sep_token
SCREAMING_SNAKE_CASE_: Any = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else cls_token
SCREAMING_SNAKE_CASE_: str = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_: Dict = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else mask_token
super().__init__(
bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , model_max_length=lowerCAmelCase__ , **lowerCAmelCase__ , )
# Creates a mapping for looking up the IDs of special symbols.
SCREAMING_SNAKE_CASE_: Dict[str, int] = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
SCREAMING_SNAKE_CASE_: List[Any] = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
SCREAMING_SNAKE_CASE_: Dict[int, str] = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
SCREAMING_SNAKE_CASE_: List[str] = UNICODE_VOCAB_SIZE
SCREAMING_SNAKE_CASE_: Tuple = len(self._special_codepoints)
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
return self._unicode_vocab_size
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : str):
return list(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : str):
try:
return ord(lowerCAmelCase__)
except TypeError:
raise ValueError(F"invalid token: '{token}'")
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : int):
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(lowerCAmelCase__)
except TypeError:
raise ValueError(F"invalid id: {index}")
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : Dict):
return "".join(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None):
SCREAMING_SNAKE_CASE_: List[str] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_: Tuple = [self.cls_token_id]
SCREAMING_SNAKE_CASE_: int = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = [1] + ([0] * len(lowerCAmelCase__)) + [1]
if token_ids_a is not None:
result += ([0] * len(lowerCAmelCase__)) + [1]
return result
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None):
SCREAMING_SNAKE_CASE_: Dict = [self.sep_token_id]
SCREAMING_SNAKE_CASE_: int = [self.cls_token_id]
SCREAMING_SNAKE_CASE_: List[str] = len(cls + token_ids_a + sep) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep) * [1]
return result
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None):
return ()
| 127
| 0
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def __lowercase ( _a ):
if "img_encoder.pos_embed" in name:
snake_case_ : Union[str, Any] = name.replace('''img_encoder.pos_embed''' , '''vision_model.embeddings.position_embeddings''' )
if "img_encoder.patch_embed.proj" in name:
snake_case_ : Dict = name.replace('''img_encoder.patch_embed.proj''' , '''vision_model.embeddings.patch_embeddings.projection''' )
if "img_encoder.patch_embed.norm" in name:
snake_case_ : Tuple = name.replace('''img_encoder.patch_embed.norm''' , '''vision_model.embeddings.layernorm''' )
if "img_encoder.layers" in name:
snake_case_ : Union[str, Any] = name.replace('''img_encoder.layers''' , '''vision_model.encoder.stages''' )
if "blocks" in name and "res" not in name:
snake_case_ : Any = name.replace('''blocks''' , '''layers''' )
if "attn" in name and "pre_assign" not in name:
snake_case_ : Dict = name.replace('''attn''' , '''self_attn''' )
if "proj" in name and "self_attn" in name and "text" not in name:
snake_case_ : Union[str, Any] = name.replace('''proj''' , '''out_proj''' )
if "pre_assign_attn.attn.proj" in name:
snake_case_ : Union[str, Any] = name.replace('''pre_assign_attn.attn.proj''' , '''pre_assign_attn.attn.out_proj''' )
if "norm1" in name:
snake_case_ : Any = name.replace('''norm1''' , '''layer_norm1''' )
if "norm2" in name and "pre_assign" not in name:
snake_case_ : int = name.replace('''norm2''' , '''layer_norm2''' )
if "img_encoder.norm" in name:
snake_case_ : Dict = name.replace('''img_encoder.norm''' , '''vision_model.layernorm''' )
# text encoder
if "text_encoder.token_embedding" in name:
snake_case_ : Optional[int] = name.replace('''text_encoder.token_embedding''' , '''text_model.embeddings.token_embedding''' )
if "text_encoder.positional_embedding" in name:
snake_case_ : int = name.replace('''text_encoder.positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' )
if "text_encoder.transformer.resblocks." in name:
snake_case_ : Optional[Any] = name.replace('''text_encoder.transformer.resblocks.''' , '''text_model.encoder.layers.''' )
if "ln_1" in name:
snake_case_ : List[str] = name.replace('''ln_1''' , '''layer_norm1''' )
if "ln_2" in name:
snake_case_ : Dict = name.replace('''ln_2''' , '''layer_norm2''' )
if "c_fc" in name:
snake_case_ : int = name.replace('''c_fc''' , '''fc1''' )
if "c_proj" in name:
snake_case_ : str = name.replace('''c_proj''' , '''fc2''' )
if "text_encoder" in name:
snake_case_ : str = name.replace('''text_encoder''' , '''text_model''' )
if "ln_final" in name:
snake_case_ : List[Any] = name.replace('''ln_final''' , '''final_layer_norm''' )
# projection layers
if "img_projector.linear_hidden." in name:
snake_case_ : Any = name.replace('''img_projector.linear_hidden.''' , '''visual_projection.''' )
if "img_projector.linear_out." in name:
snake_case_ : Any = name.replace('''img_projector.linear_out.''' , '''visual_projection.3.''' )
if "text_projector.linear_hidden" in name:
snake_case_ : List[str] = name.replace('''text_projector.linear_hidden''' , '''text_projection''' )
if "text_projector.linear_out" in name:
snake_case_ : Dict = name.replace('''text_projector.linear_out''' , '''text_projection.3''' )
return name
def __lowercase ( _a , _a ):
for key in orig_state_dict.copy().keys():
snake_case_ : Union[str, Any] = orig_state_dict.pop(lowerCAmelCase_ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
snake_case_ : Any = key.split('''.''' )
snake_case_ : Dict = int(key_split[2] ), int(key_split[4] )
snake_case_ : int = config.vision_config.hidden_size
if "weight" in key:
snake_case_ : Any = val[:dim, :]
snake_case_ : List[Any] = val[dim : dim * 2, :]
snake_case_ : Optional[int] = val[-dim:, :]
else:
snake_case_ : Optional[int] = val[:dim]
snake_case_ : Union[str, Any] = val[dim : dim * 2]
snake_case_ : Dict = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
snake_case_ : List[str] = key.split('''.''' )
snake_case_ : List[Any] = int(key_split[3] )
snake_case_ : Tuple = config.text_config.hidden_size
if "weight" in key:
snake_case_ : int = val[:dim, :]
snake_case_ : str = val[
dim : dim * 2, :
]
snake_case_ : str = val[-dim:, :]
else:
snake_case_ : Optional[int] = val[:dim]
snake_case_ : Union[str, Any] = val[dim : dim * 2]
snake_case_ : Optional[int] = val[-dim:]
else:
snake_case_ : Dict = rename_key(lowerCAmelCase_ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
snake_case_ : Any = val.squeeze_()
else:
snake_case_ : List[Any] = val
return orig_state_dict
def __lowercase ( ):
snake_case_ : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case_ : Union[str, Any] = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def __lowercase ( _a , _a , _a="groupvit-gcc-yfcc" , _a=False ):
snake_case_ : List[Any] = GroupViTConfig()
snake_case_ : Any = GroupViTModel(lowerCAmelCase_ ).eval()
snake_case_ : Dict = torch.load(lowerCAmelCase_ , map_location='''cpu''' )["""model"""]
snake_case_ : Optional[int] = convert_state_dict(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : str = model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowerCAmelCase_ ) == 0)
# verify result
snake_case_ : int = CLIPProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
snake_case_ : Union[str, Any] = prepare_img()
snake_case_ : List[Any] = processor(text=['''a photo of a cat''', '''a photo of a dog'''] , images=lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='''pt''' )
with torch.no_grad():
snake_case_ : List[str] = model(**lowerCAmelCase_ )
if model_name == "groupvit-gcc-yfcc":
snake_case_ : List[str] = torch.tensor([[13.3_523, 6.3629]] )
elif model_name == "groupvit-gcc-redcaps":
snake_case_ : Optional[int] = torch.tensor([[16.1_873, 8.6230]] )
else:
raise ValueError(f"Model name {model_name} not supported." )
assert torch.allclose(outputs.logits_per_image , lowerCAmelCase_ , atol=1E-3 )
processor.save_pretrained(lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
print('''Successfully saved processor and model to''' , lowerCAmelCase_ )
if push_to_hub:
print('''Pushing to the hub...''' )
processor.push_to_hub(lowerCAmelCase_ , organization='''nielsr''' )
model.push_to_hub(lowerCAmelCase_ , organization='''nielsr''' )
if __name__ == "__main__":
lowercase__ : Any = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to dump the processor and PyTorch model.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to GroupViT checkpoint''')
parser.add_argument(
'''--model_name''',
default='''groupvit-gccy-fcc''',
type=str,
help='''Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.''',
)
lowercase__ : Union[str, Any] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 264
|
'''simple docstring'''
from torch import nn
class lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self ,a_ ,a_ ) -> List[Any]:
super().__init__()
_UpperCAmelCase : Dict = class_size
_UpperCAmelCase : Union[str, Any] = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
_UpperCAmelCase : List[Any] = nn.Linear(a_ ,a_ )
def _snake_case ( self ,a_ ) -> Tuple:
# hidden_state = nn.functional.relu(self.mlp1(hidden_state))
# hidden_state = self.mlp2(hidden_state)
_UpperCAmelCase : Optional[int] = self.mlp(a_ )
return logits
| 215
| 0
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
_lowercase: str = pd.read_csv("sample_data.csv", header=None)
_lowercase: List[Any] = df.shape[:1][0]
# If you're using some other dataset input the target column
_lowercase: Optional[Any] = df.iloc[:, 1:2]
_lowercase: List[Any] = actual_data.values.reshape(len_data, 1)
_lowercase: Optional[Any] = MinMaxScaler().fit_transform(actual_data)
_lowercase: Optional[Any] = 10
_lowercase: Optional[Any] = 5
_lowercase: Optional[Any] = 20
_lowercase: Optional[Any] = len_data - periods * look_back
_lowercase: Optional[Any] = actual_data[:division]
_lowercase: Tuple = actual_data[division - look_back :]
_lowercase , _lowercase: Union[str, Any] = [], []
_lowercase , _lowercase: List[Any] = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
_lowercase: Union[str, Any] = np.array(train_x)
_lowercase: Any = np.array(test_x)
_lowercase: Dict = np.array([list(i.ravel()) for i in train_y])
_lowercase: Dict = np.array([list(i.ravel()) for i in test_y])
_lowercase: Optional[Any] = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss="mean_squared_error", optimizer="adam")
_lowercase: Union[str, Any] = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
_lowercase: List[str] = model.predict(x_test)
| 364
|
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
_lowercase: List[Any] = {
"tiny.en": "https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt",
"tiny": "https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt",
"base.en": "https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt",
"base": "https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt",
"small.en": "https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt",
"small": "https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt",
"medium.en": "https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt",
"medium": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt",
"large": "https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt",
"large-v2": "https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt",
}
def a( A : List[str] ) -> Union[str, Any]:
"""simple docstring"""
a = ["layers", "blocks"]
for k in ignore_keys:
state_dict.pop(A , A )
_lowercase: Optional[int] = {
"blocks": "layers",
"mlp.0": "fc1",
"mlp.2": "fc2",
"mlp_ln": "final_layer_norm",
".attn.query": ".self_attn.q_proj",
".attn.key": ".self_attn.k_proj",
".attn.value": ".self_attn.v_proj",
".attn_ln": ".self_attn_layer_norm",
".attn.out": ".self_attn.out_proj",
".cross_attn.query": ".encoder_attn.q_proj",
".cross_attn.key": ".encoder_attn.k_proj",
".cross_attn.value": ".encoder_attn.v_proj",
".cross_attn_ln": ".encoder_attn_layer_norm",
".cross_attn.out": ".encoder_attn.out_proj",
"decoder.ln.": "decoder.layer_norm.",
"encoder.ln.": "encoder.layer_norm.",
"token_embedding": "embed_tokens",
"encoder.positional_embedding": "encoder.embed_positions.weight",
"decoder.positional_embedding": "decoder.embed_positions.weight",
"ln_post": "layer_norm",
}
def a( A : str ) -> Union[str, Any]:
"""simple docstring"""
a = list(s_dict.keys() )
for key in keys:
a = key
for k, v in WHISPER_MAPPING.items():
if k in key:
a = new_key.replace(A , A )
print(f'''{key} -> {new_key}''' )
a = s_dict.pop(A )
return s_dict
def a( A : Union[str, Any] ) -> Tuple:
"""simple docstring"""
a , a = emb.weight.shape
a = nn.Linear(A , A , bias=A )
a = emb.weight.data
return lin_layer
def a( A : str , A : str ) -> bytes:
"""simple docstring"""
os.makedirs(A , exist_ok=A )
a = os.path.basename(A )
a = url.split("/" )[-2]
a = os.path.join(A , A )
if os.path.exists(A ) and not os.path.isfile(A ):
raise RuntimeError(f'''{download_target} exists and is not a regular file''' )
if os.path.isfile(A ):
a = open(A , "rb" ).read()
if hashlib.shaaaa(A ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f'''{download_target} exists, but the SHA256 checksum does not match; re-downloading the file''' )
with urllib.request.urlopen(A ) as source, open(A , "wb" ) as output:
with tqdm(
total=int(source.info().get("Content-Length" ) ) , ncols=80 , unit="iB" , unit_scale=A , unit_divisor=1024 ) as loop:
while True:
a = source.read(8192 )
if not buffer:
break
output.write(A )
loop.update(len(A ) )
a = open(A , "rb" ).read()
if hashlib.shaaaa(A ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model." )
return model_bytes
def a( A : List[str] , A : Union[str, Any] ) -> str:
"""simple docstring"""
if ".pt" not in checkpoint_path:
a = _download(_MODELS[checkpoint_path] )
else:
a = torch.load(A , map_location="cpu" )
a = original_checkpoint["dims"]
a = original_checkpoint["model_state_dict"]
a = state_dict["decoder.token_embedding.weight"]
remove_ignore_keys_(A )
rename_keys(A )
a = True
a = state_dict["decoder.layers.0.fc1.weight"].shape[0]
a = WhisperConfig(
vocab_size=dimensions["n_vocab"] , encoder_ffn_dim=A , decoder_ffn_dim=A , num_mel_bins=dimensions["n_mels"] , d_model=dimensions["n_audio_state"] , max_target_positions=dimensions["n_text_ctx"] , encoder_layers=dimensions["n_audio_layer"] , encoder_attention_heads=dimensions["n_audio_head"] , decoder_layers=dimensions["n_text_layer"] , decoder_attention_heads=dimensions["n_text_state"] , max_source_positions=dimensions["n_audio_ctx"] , )
a = WhisperForConditionalGeneration(A )
a , a = model.model.load_state_dict(A , strict=A )
if len(A ) > 0 and not set(A ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
f''' but all the following weights are missing {missing}''' )
if tie_embeds:
a = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
a = proj_out_weights
model.save_pretrained(A )
if __name__ == "__main__":
_lowercase: Dict = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Patht to the downloaded checkpoints")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
_lowercase: List[str] = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 71
| 0
|
import numpy as np
a_ = [
['a', 'b', 'c', 'd', 'e'],
['f', 'g', 'h', 'i', 'k'],
['l', 'm', 'n', 'o', 'p'],
['q', 'r', 's', 't', 'u'],
['v', 'w', 'x', 'y', 'z'],
]
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = np.array(a )
def __UpperCamelCase ( self : int , a : str ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = np.where(letter == self.SQUARE )
SCREAMING_SNAKE_CASE : Optional[Any] = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def __UpperCamelCase ( self : Optional[Any] , a : int , a : int ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.SQUARE[indexa - 1, indexa - 1]
return letter
def __UpperCamelCase ( self : Any , a : str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = message.lower()
SCREAMING_SNAKE_CASE : Union[str, Any] = message.replace(" " , "" )
SCREAMING_SNAKE_CASE : List[str] = message.replace("j" , "i" )
SCREAMING_SNAKE_CASE : int = np.empty((2, len(a )) )
for letter_index in range(len(a ) ):
SCREAMING_SNAKE_CASE : int = self.letter_to_numbers(message[letter_index] )
SCREAMING_SNAKE_CASE : Tuple = numbers[0]
SCREAMING_SNAKE_CASE : Optional[int] = numbers[1]
SCREAMING_SNAKE_CASE : Union[str, Any] = first_step.reshape(2 * len(a ) )
SCREAMING_SNAKE_CASE : List[str] = ""
for numbers_index in range(len(a ) ):
SCREAMING_SNAKE_CASE : List[str] = int(second_step[numbers_index * 2] )
SCREAMING_SNAKE_CASE : Optional[int] = int(second_step[(numbers_index * 2) + 1] )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.numbers_to_letter(a , a )
SCREAMING_SNAKE_CASE : List[str] = encoded_message + letter
return encoded_message
def __UpperCamelCase ( self : List[Any] , a : str ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = message.lower()
message.replace(" " , "" )
SCREAMING_SNAKE_CASE : List[Any] = np.empty(2 * len(a ) )
for letter_index in range(len(a ) ):
SCREAMING_SNAKE_CASE : Tuple = self.letter_to_numbers(message[letter_index] )
SCREAMING_SNAKE_CASE : Dict = numbers[0]
SCREAMING_SNAKE_CASE : str = numbers[1]
SCREAMING_SNAKE_CASE : Optional[Any] = first_step.reshape((2, len(a )) )
SCREAMING_SNAKE_CASE : List[str] = ""
for numbers_index in range(len(a ) ):
SCREAMING_SNAKE_CASE : List[str] = int(second_step[0, numbers_index] )
SCREAMING_SNAKE_CASE : str = int(second_step[1, numbers_index] )
SCREAMING_SNAKE_CASE : List[Any] = self.numbers_to_letter(a , a )
SCREAMING_SNAKE_CASE : List[str] = decoded_message + letter
return decoded_message
| 76
|
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a = 1_6
a = 3_2
def lowercase (snake_case__ : Accelerator , snake_case__ : int = 16 ) -> List[str]:
'''simple docstring'''
lowerCAmelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowerCAmelCase = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(snake_case__ : int ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=snake_case__ , max_length=snake_case__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase = datasets.map(
snake_case__ , batched=snake_case__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(snake_case__ : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase = 16
elif accelerator.mixed_precision != "no":
lowerCAmelCase = 8
else:
lowerCAmelCase = None
return tokenizer.pad(
snake_case__ , padding="""longest""" , max_length=snake_case__ , pad_to_multiple_of=snake_case__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowerCAmelCase = DataLoader(
tokenized_datasets["""train"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
lowerCAmelCase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a = mocked_dataloaders # noqa: F811
def lowercase (snake_case__ : Any , snake_case__ : List[str] ) -> str:
'''simple docstring'''
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , snake_case__ ) == "1":
lowerCAmelCase = 2
# New Code #
lowerCAmelCase = int(args.gradient_accumulation_steps )
lowerCAmelCase = int(args.local_sgd_steps )
# Initialize accelerator
lowerCAmelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=snake_case__ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase = config["""lr"""]
lowerCAmelCase = int(config["""num_epochs"""] )
lowerCAmelCase = int(config["""seed"""] )
lowerCAmelCase = int(config["""batch_size"""] )
lowerCAmelCase = evaluate.load("""glue""" , """mrpc""" )
set_seed(snake_case__ )
lowerCAmelCase , lowerCAmelCase = get_dataloaders(snake_case__ , snake_case__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=snake_case__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase = AdamW(params=model.parameters() , lr=snake_case__ )
# Instantiate scheduler
lowerCAmelCase = get_linear_schedule_with_warmup(
optimizer=snake_case__ , num_warmup_steps=100 , num_training_steps=(len(snake_case__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Now we train the model
for epoch in range(snake_case__ ):
model.train()
with LocalSGD(
accelerator=snake_case__ , model=snake_case__ , local_sgd_steps=snake_case__ , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(snake_case__ ):
lowerCAmelCase = model(**snake_case__ )
lowerCAmelCase = output.loss
accelerator.backward(snake_case__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase = model(**snake_case__ )
lowerCAmelCase = outputs.logits.argmax(dim=-1 )
lowerCAmelCase , lowerCAmelCase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=snake_case__ , references=snake_case__ , )
lowerCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , snake_case__ )
def lowercase () -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=snake_case__ , default=snake_case__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=snake_case__ , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument(
"""--local_sgd_steps""" , type=snake_case__ , default=8 , help="""Number of local SGD steps or None to disable local SGD""" )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
lowerCAmelCase = parser.parse_args()
lowerCAmelCase = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(snake_case__ , snake_case__ )
if __name__ == "__main__":
main()
| 155
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : Tuple = {
"""google/pegasus-large""": """https://huggingface.co/google/pegasus-large/resolve/main/config.json""",
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''pegasus'''
UpperCAmelCase__ = ['''past_key_values''']
UpperCAmelCase__ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Tuple , UpperCAmelCase__ : List[Any]=50_265 , UpperCAmelCase__ : Optional[int]=1_024 , UpperCAmelCase__ : List[Any]=12 , UpperCAmelCase__ : List[str]=4_096 , UpperCAmelCase__ : List[str]=16 , UpperCAmelCase__ : Dict=12 , UpperCAmelCase__ : Any=4_096 , UpperCAmelCase__ : Dict=16 , UpperCAmelCase__ : Optional[int]=0.0 , UpperCAmelCase__ : Optional[int]=0.0 , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : int="gelu" , UpperCAmelCase__ : Optional[Any]=1_024 , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : List[Any]=0.02 , UpperCAmelCase__ : int=0 , UpperCAmelCase__ : Dict=False , UpperCAmelCase__ : str=0 , UpperCAmelCase__ : Dict=1 , UpperCAmelCase__ : Optional[int]=1 , **UpperCAmelCase__ : Dict , ) ->Optional[int]:
'''simple docstring'''
A__ = vocab_size
A__ = max_position_embeddings
A__ = d_model
A__ = encoder_ffn_dim
A__ = encoder_layers
A__ = encoder_attention_heads
A__ = decoder_ffn_dim
A__ = decoder_layers
A__ = decoder_attention_heads
A__ = dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = activation_function
A__ = init_std
A__ = encoder_layerdrop
A__ = decoder_layerdrop
A__ = use_cache
A__ = encoder_layers
A__ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , is_encoder_decoder=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , forced_eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE ( self : str) ->int:
'''simple docstring'''
return self.d_model
| 359
|
_lowerCamelCase : dict[str, float] = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.609_344,
"knot": 1.852,
}
_lowerCamelCase : dict[str, float] = {
"km/h": 1.0,
"m/s": 0.277_777_778,
"mph": 0.621_371_192,
"knot": 0.539_956_803,
}
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> float:
"""simple docstring"""
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
A__ = (
f"""Incorrect 'from_type' or 'to_type' value: {unit_from!r}, {unit_to!r}\n"""
f"""Valid values are: {", ".join(lowercase_ )}"""
)
raise ValueError(lowercase_ )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 231
| 0
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'vocab_file': 'spiece.model'}
__UpperCAmelCase = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
}
}
__UpperCAmelCase = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
__UpperCAmelCase = '▁'
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Union[str, Any] = VOCAB_FILES_NAMES
_snake_case : List[str] = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _UpperCamelCase , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=False , _UpperCamelCase="[CLS]" , _UpperCamelCase="[SEP]" , _UpperCamelCase="<unk>" , _UpperCamelCase="[SEP]" , _UpperCamelCase="<pad>" , _UpperCamelCase="[CLS]" , _UpperCamelCase="[MASK]" , _UpperCamelCase = None , **_UpperCamelCase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
UpperCAmelCase_ : Dict = (
AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase , normalized=_UpperCamelCase )
if isinstance(_UpperCamelCase , _UpperCamelCase )
else mask_token
)
UpperCAmelCase_ : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_UpperCamelCase , remove_space=_UpperCamelCase , keep_accents=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , )
UpperCAmelCase_ : List[str] = do_lower_case
UpperCAmelCase_ : Optional[Any] = remove_space
UpperCAmelCase_ : Union[str, Any] = keep_accents
UpperCAmelCase_ : Any = vocab_file
UpperCAmelCase_ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCamelCase )
@property
def __UpperCAmelCase ( self ) -> Union[str, Any]:
return len(self.sp_model )
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : Optional[Any] = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = self.__dict__.copy()
UpperCAmelCase_ : Any = None
return state
def __setstate__( self , _UpperCamelCase ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCAmelCase_ : Dict = {}
UpperCAmelCase_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[str]:
if self.remove_space:
UpperCAmelCase_ : Tuple = ' '.join(inputs.strip().split() )
else:
UpperCAmelCase_ : List[Any] = inputs
UpperCAmelCase_ : int = outputs.replace('``' , '"' ).replace('\'\'' , '"' )
if not self.keep_accents:
UpperCAmelCase_ : int = unicodedata.normalize('NFKD' , _UpperCamelCase )
UpperCAmelCase_ : str = ''.join([c for c in outputs if not unicodedata.combining(_UpperCamelCase )] )
if self.do_lower_case:
UpperCAmelCase_ : Optional[int] = outputs.lower()
return outputs
def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[str]:
UpperCAmelCase_ : Any = self.preprocess_text(_UpperCamelCase )
UpperCAmelCase_ : int = self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
UpperCAmelCase_ : Any = []
for piece in pieces:
if len(_UpperCamelCase ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
UpperCAmelCase_ : List[str] = self.sp_model.EncodeAsPieces(piece[:-1].replace(_UpperCamelCase , '' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
UpperCAmelCase_ : Union[str, Any] = cur_pieces[1:]
else:
UpperCAmelCase_ : str = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_UpperCamelCase )
else:
new_pieces.append(_UpperCamelCase )
return new_pieces
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Any:
return self.sp_model.PieceToId(_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Tuple:
return self.sp_model.IdToPiece(_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[Any]:
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : List[Any] = ''
UpperCAmelCase_ : List[str] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_UpperCamelCase ) + token
UpperCAmelCase_ : Tuple = True
UpperCAmelCase_ : Optional[int] = []
else:
current_sub_tokens.append(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = False
out_string += self.sp_model.decode(_UpperCamelCase )
return out_string.strip()
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> List[int]:
UpperCAmelCase_ : Tuple = [self.sep_token_id]
UpperCAmelCase_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
if token_ids_a is not None:
return [1] + ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1]
return [1] + ([0] * len(_UpperCamelCase )) + [1]
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> List[int]:
UpperCAmelCase_ : int = [self.sep_token_id]
UpperCAmelCase_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> Tuple[str]:
if not os.path.isdir(_UpperCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase_ : int = os.path.join(
_UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase , 'wb' ) as fi:
UpperCAmelCase_ : Tuple = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,)
| 29
|
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> None:
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' , _UpperCamelCase , )
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
| 29
| 1
|
"""simple docstring"""
def __A ( a_ :list) -> list:
__a : str = len(a_)
for i in range(1 , a_):
__a : Tuple = collection[i]
__a : List[Any] = 0
__a : Union[str, Any] = i - 1
while low <= high:
__a : int = (low + high) // 2
if val < collection[mid]:
__a : int = mid - 1
else:
__a : Dict = mid + 1
for j in range(a_ , a_ , -1):
__a : List[str] = collection[j - 1]
__a : int = val
return collection
if __name__ == "__main__":
A = input('''Enter numbers separated by a comma:\n''').strip()
A = [int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted))
| 188
|
"""simple docstring"""
import os
import string
import sys
A = 1 << 8
A = {
'''tab''': ord('''\t'''),
'''newline''': ord('''\r'''),
'''esc''': 27,
'''up''': 65 + ARROW_KEY_FLAG,
'''down''': 66 + ARROW_KEY_FLAG,
'''right''': 67 + ARROW_KEY_FLAG,
'''left''': 68 + ARROW_KEY_FLAG,
'''mod_int''': 91,
'''undefined''': sys.maxsize,
'''interrupt''': 3,
'''insert''': 50,
'''delete''': 51,
'''pg_up''': 53,
'''pg_down''': 54,
}
A = KEYMAP['''up''']
A = KEYMAP['''left''']
if sys.platform == "win32":
A = []
A = {
B'''\xe0H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\x00H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\xe0P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\x00P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\xe0M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\x00M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\xe0K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
B'''\x00K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
}
for i in range(10):
A = ord(str(i))
def __A ( ) -> Dict:
if os.name == "nt":
import msvcrt
__a : Optional[Any] = '''mbcs'''
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(a_) == 0:
# Read the keystroke
__a : Optional[Any] = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
__a : Optional[Any] = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
__a : Union[str, Any] = chr(WIN_KEYMAP[cha])
WIN_CH_BUFFER.append(chr(KEYMAP['''mod_int''']))
WIN_CH_BUFFER.append(a_)
if ord(a_) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_26))
__a : str = chr(KEYMAP['''esc'''])
except KeyError:
__a : str = cha[1]
else:
__a : Optional[Any] = ch.decode(a_)
else:
__a : Union[str, Any] = WIN_CH_BUFFER.pop(0)
elif os.name == "posix":
import termios
import tty
__a : Any = sys.stdin.fileno()
__a : List[str] = termios.tcgetattr(a_)
try:
tty.setraw(a_)
__a : int = sys.stdin.read(1)
finally:
termios.tcsetattr(a_ , termios.TCSADRAIN , a_)
return ch
def __A ( ) -> str:
__a : Any = get_raw_chars()
if ord(a_) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(a_) == KEYMAP["esc"]:
__a : str = get_raw_chars()
if ord(a_) == KEYMAP["mod_int"]:
__a : List[str] = get_raw_chars()
if ord(a_) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(a_) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(a_) + ARROW_KEY_FLAG)
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 188
| 1
|
import unittest
from knapsack import greedy_knapsack as kp
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: Tuple ) -> str:
"""simple docstring"""
lowercase__ = [10, 20, 30, 40, 50, 60]
lowercase__ = [2, 4, 6, 8, 10, 12]
lowercase__ = 100
self.assertEqual(kp.calc_profit(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , 210 )
def lowerCamelCase_ ( self: int ) -> Dict:
"""simple docstring"""
self.assertRaisesRegex(__UpperCAmelCase , '''max_weight must greater than zero.''' )
def lowerCamelCase_ ( self: int ) -> Tuple:
"""simple docstring"""
self.assertRaisesRegex(__UpperCAmelCase , '''Weight can not be negative.''' )
def lowerCamelCase_ ( self: List[str] ) -> Union[str, Any]:
"""simple docstring"""
self.assertRaisesRegex(__UpperCAmelCase , '''Profit can not be negative.''' )
def lowerCamelCase_ ( self: Tuple ) -> Any:
"""simple docstring"""
self.assertRaisesRegex(__UpperCAmelCase , '''max_weight must greater than zero.''' )
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[int]:
"""simple docstring"""
self.assertRaisesRegex(
__UpperCAmelCase , '''The length of profit and weight must be same.''' )
if __name__ == "__main__":
unittest.main()
| 110
|
def a__ ( _UpperCamelCase : int ):
__lowerCamelCase = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 330
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : List[str] = logging.get_logger(__name__)
A_ : Optional[int] = {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: str = '''gpt_neox'''
def __init__( self , A__=5_0432 , A__=6144 , A__=44 , A__=64 , A__=2_4576 , A__="gelu" , A__=0.2_5 , A__=1_0000 , A__=0.0 , A__=0.0 , A__=0.1 , A__=2048 , A__=0.0_2 , A__=1e-5 , A__=True , A__=0 , A__=2 , A__=False , A__=True , A__=None , **A__ , ):
super().__init__(bos_token_id=A__ , eos_token_id=A__ , **A__ )
A__ : int = vocab_size
A__ : List[Any] = max_position_embeddings
A__ : Optional[int] = hidden_size
A__ : int = num_hidden_layers
A__ : List[Any] = num_attention_heads
A__ : Dict = intermediate_size
A__ : List[str] = hidden_act
A__ : str = rotary_pct
A__ : str = rotary_emb_base
A__ : Optional[int] = attention_dropout
A__ : Optional[Any] = hidden_dropout
A__ : Union[str, Any] = classifier_dropout
A__ : Tuple = initializer_range
A__ : Optional[Any] = layer_norm_eps
A__ : Union[str, Any] = use_cache
A__ : str = tie_word_embeddings
A__ : Dict = use_parallel_residual
A__ : Any = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"""The hidden size is not divisble by the number of attention heads! Make sure to update them!""" )
def __A ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , A__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
F"""got {self.rope_scaling}""" )
A__ : List[Any] = self.rope_scaling.get("""type""" , A__ )
A__ : Dict = self.rope_scaling.get("""factor""" , A__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(A__ , A__ ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 356
|
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
A_ : Optional[int] = '<<<<<<< This should probably be modified because it mentions: '
A_ : int = '=======\n>>>>>>>\n'
A_ : Optional[Any] = [
'TextEncoderConfig',
'ByteTextEncoder',
'SubwordTextEncoder',
'encoder_config',
'maybe_build_from_corpus',
'manual_dir',
]
A_ : List[Any] = [
# (pattern, replacement)
# Order is important here for some replacements
(r'tfds\.core', r'datasets'),
(r'tf\.io\.gfile\.GFile', r'open'),
(r'tf\.([\w\d]+)', r'datasets.Value(\'\1\')'),
(r'tfds\.features\.Text\(\)', r'datasets.Value(\'string\')'),
(r'tfds\.features\.Text\(', r'datasets.Value(\'string\'),'),
(r'features\s*=\s*tfds.features.FeaturesDict\(', r'features=datasets.Features('),
(r'tfds\.features\.FeaturesDict\(', r'dict('),
(r'The TensorFlow Datasets Authors', r'The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'),
(r'tfds\.', r'datasets.'),
(r'dl_manager\.manual_dir', r'self.config.data_dir'),
(r'self\.builder_config', r'self.config'),
]
def UpperCamelCase (lowercase_: Namespace ) -> str:
return ConvertCommand(args.tfds_path , args.datasets_directory )
class _a (__magic_name__ ):
'''simple docstring'''
@staticmethod
def __A ( A__ ):
A__ : Any = parser.add_parser(
"""convert""" , help="""Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.""" , )
train_parser.add_argument(
"""--tfds_path""" , type=A__ , required=A__ , help="""Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.""" , )
train_parser.add_argument(
"""--datasets_directory""" , type=A__ , required=A__ , help="""Path to the HuggingFace Datasets folder.""" )
train_parser.set_defaults(func=A__ )
def __init__( self , A__ , A__ , *A__ ):
A__ : Optional[Any] = get_logger("""datasets-cli/converting""" )
A__ : Optional[Any] = tfds_path
A__ : Dict = datasets_directory
def __A ( self ):
if os.path.isdir(self._tfds_path ):
A__ : Any = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
A__ : int = os.path.dirname(self._tfds_path )
else:
raise ValueError("""--tfds_path is neither a directory nor a file. Please check path.""" )
A__ : Optional[int] = os.path.abspath(self._datasets_directory )
self._logger.info(F"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""" )
A__ : Optional[Any] = []
A__ : List[Any] = []
A__ : List[Any] = {}
if os.path.isdir(self._tfds_path ):
A__ : List[str] = os.listdir(A__ )
else:
A__ : Optional[Any] = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F"""Looking at file {f_name}""" )
A__ : Any = os.path.join(A__ , A__ )
A__ : Optional[Any] = os.path.join(A__ , A__ )
if not os.path.isfile(A__ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("""Skipping file""" )
continue
with open(A__ , encoding="""utf-8""" ) as f:
A__ : Dict = f.readlines()
A__ : Tuple = []
A__ : str = False
A__ : Tuple = False
A__ : Optional[Any] = []
for line in lines:
A__ : Optional[int] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
A__ : Dict = """import datasets\n"""
elif "import tensorflow" in out_line:
# order is important here
A__ : Any = """"""
continue
elif "from absl import logging" in out_line:
A__ : int = """from datasets import logging\n"""
elif "getLogger" in out_line:
A__ : List[str] = out_line.replace("""getLogger""" , """get_logger""" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
A__ : Optional[Any] = True
A__ : int = list(filter(lambda A__ : e in out_line , A__ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(A__ ) + """\n""" )
out_lines.append(A__ )
out_lines.append(A__ )
continue
else:
for pattern, replacement in TO_CONVERT:
A__ : List[str] = re.sub(A__ , A__ , A__ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
A__ : Dict = re.match(r"""from\stensorflow_datasets.*import\s([^\.\r\n]+)""" , A__ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(""",""" ) )
A__ : Tuple = """from . import """ + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F"""Error converting {out_line.strip()}""" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
A__ : List[str] = True
out_lines.append(A__ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
A__ : Tuple = f_name.replace(""".py""" , """""" )
A__ : Optional[int] = os.path.join(A__ , A__ )
A__ : Optional[Any] = os.path.join(A__ , A__ )
os.makedirs(A__ , exist_ok=A__ )
self._logger.info(F"""Adding directory {output_dir}""" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(A__ )
if needs_manual_update:
with_manual_update.append(A__ )
with open(A__ , """w""" , encoding="""utf-8""" ) as f:
f.writelines(A__ )
self._logger.info(F"""Converted in {output_file}""" )
for utils_file in utils_files:
try:
A__ : int = os.path.basename(A__ )
A__ : List[str] = imports_to_builder_map[f_name.replace(""".py""" , """""" )]
self._logger.info(F"""Moving {dest_folder} to {utils_file}""" )
shutil.copy(A__ , A__ )
except KeyError:
self._logger.error(F"""Cannot find destination folder for {utils_file}. Please copy manually.""" )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F"""You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.""" )
| 141
| 0
|
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
return credit_card_number.startswith(('34', '35', '37', '4', '5', '6') )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = credit_card_number
snake_case_ = 0
snake_case_ = len(UpperCamelCase__ ) - 2
for i in range(UpperCamelCase__ , -1 , -2 ):
# double the value of every second digit
snake_case_ = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
snake_case_ = cc_number[:i] + str(UpperCamelCase__ ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(UpperCamelCase__ ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = F'''{credit_card_number} is an invalid credit card number because'''
if not credit_card_number.isdigit():
print(F'''{error_message} it has nonnumerical characters.''' )
return False
if not 13 <= len(UpperCamelCase__ ) <= 16:
print(F'''{error_message} of its length.''' )
return False
if not validate_initial_digits(UpperCamelCase__ ):
print(F'''{error_message} of its first two digits.''' )
return False
if not luhn_validation(UpperCamelCase__ ):
print(F'''{error_message} it fails the Luhn check.''' )
return False
print(F'''{credit_card_number} is a valid credit card number.''' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("""4111111111111111""")
validate_credit_card_number("""32323""")
| 285
|
import os
import numpy
import onnx
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = a.name
snake_case_ = b.name
snake_case_ = ''
snake_case_ = ''
snake_case_ = a == b
snake_case_ = name_a
snake_case_ = name_b
return res
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(UpperCamelCase__ , UpperCamelCase__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCamelCase__ , UpperCamelCase__ )
_graph_replace_input_with(node_proto.attribute[1].g , UpperCamelCase__ , UpperCamelCase__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCamelCase__ , UpperCamelCase__ )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
for n in graph_proto.node:
_node_replace_input_with(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = list(model.graph.initializer )
snake_case_ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
snake_case_ = inits[i].name
snake_case_ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , UpperCamelCase__ , UpperCamelCase__ )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = os.path.dirname(UpperCamelCase__ )
snake_case_ = os.path.basename(UpperCamelCase__ )
snake_case_ = onnx.load(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
snake_case_ = list(model.graph.initializer )
snake_case_ = set()
snake_case_ = {}
snake_case_ = []
snake_case_ = 0
for i in range(len(UpperCamelCase__ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(UpperCamelCase__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(UpperCamelCase__ )
dup_set.add(UpperCamelCase__ )
snake_case_ = inits[j].data_type
snake_case_ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('unexpected data type: ' , UpperCamelCase__ )
total_reduced_size += mem_size
snake_case_ = inits[i].name
snake_case_ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(UpperCamelCase__ )
else:
snake_case_ = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB' )
snake_case_ = sorted(UpperCamelCase__ )
_remove_dup_initializers_from_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
snake_case_ = 'optimized_' + model_file_name
snake_case_ = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
onnx.save(UpperCamelCase__ , UpperCamelCase__ )
return new_model
| 285
| 1
|
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class A__ :
def __init__( self : Optional[int] , a : Optional[Any] , a : Optional[int]=13 , a : Optional[int]=7 , a : Any=True , a : Tuple=True , a : int=True , a : List[str]=True , a : Dict=99 , a : Dict=16 , a : Dict=36 , a : str=6 , a : Union[str, Any]=6 , a : int=6 , a : Any=37 , a : Dict="gelu" , a : Union[str, Any]=0.1 , a : List[str]=0.1 , a : List[str]=512 , a : Dict=16 , a : Tuple=2 , a : List[Any]=0.0_2 , a : Optional[int]=3 , a : int=4 , a : Optional[Any]=None , ):
'''simple docstring'''
lowerCAmelCase__ : Any = parent
lowerCAmelCase__ : Optional[Any] = batch_size
lowerCAmelCase__ : Union[str, Any] = seq_length
lowerCAmelCase__ : Any = is_training
lowerCAmelCase__ : List[str] = use_input_mask
lowerCAmelCase__ : List[str] = use_token_type_ids
lowerCAmelCase__ : Union[str, Any] = use_labels
lowerCAmelCase__ : Any = vocab_size
lowerCAmelCase__ : Any = embedding_size
lowerCAmelCase__ : Tuple = hidden_size
lowerCAmelCase__ : Dict = num_hidden_layers
lowerCAmelCase__ : Dict = num_hidden_groups
lowerCAmelCase__ : Tuple = num_attention_heads
lowerCAmelCase__ : str = intermediate_size
lowerCAmelCase__ : List[Any] = hidden_act
lowerCAmelCase__ : Tuple = hidden_dropout_prob
lowerCAmelCase__ : str = attention_probs_dropout_prob
lowerCAmelCase__ : Any = max_position_embeddings
lowerCAmelCase__ : List[str] = type_vocab_size
lowerCAmelCase__ : Tuple = type_sequence_label_size
lowerCAmelCase__ : int = initializer_range
lowerCAmelCase__ : Optional[int] = num_labels
lowerCAmelCase__ : Tuple = num_choices
lowerCAmelCase__ : Dict = scope
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : Tuple = None
if self.use_input_mask:
lowerCAmelCase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ : Optional[int] = None
if self.use_token_type_ids:
lowerCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : Optional[Any] = None
lowerCAmelCase__ : str = None
if self.use_labels:
lowerCAmelCase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ : List[str] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def _lowerCamelCase ( self : Optional[Any] , a : Dict , a : int , a : Union[str, Any] , a : Any , a : Optional[int] , a : Dict , a : str ):
'''simple docstring'''
lowerCAmelCase__ : str = AlbertModel(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Dict = model(a , attention_mask=a , token_type_ids=a )
lowerCAmelCase__ : List[str] = model(a , token_type_ids=a )
lowerCAmelCase__ : Any = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowerCamelCase ( self : List[Any] , a : Optional[Any] , a : Any , a : int , a : int , a : Union[str, Any] , a : int , a : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = AlbertForPreTraining(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Optional[int] = model(
a , attention_mask=a , token_type_ids=a , labels=a , sentence_order_label=a , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def _lowerCamelCase ( self : List[str] , a : int , a : Dict , a : Union[str, Any] , a : Optional[Any] , a : Optional[Any] , a : int , a : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = AlbertForMaskedLM(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Tuple = model(a , attention_mask=a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self : Tuple , a : Tuple , a : Any , a : Dict , a : Dict , a : List[str] , a : Union[str, Any] , a : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = AlbertForQuestionAnswering(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : str = model(
a , attention_mask=a , token_type_ids=a , start_positions=a , end_positions=a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self : Optional[Any] , a : Optional[Any] , a : List[Any] , a : str , a : Dict , a : Any , a : List[Any] , a : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.num_labels
lowerCAmelCase__ : List[Any] = AlbertForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Optional[Any] = model(a , attention_mask=a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self : str , a : str , a : Dict , a : Dict , a : Dict , a : Union[str, Any] , a : Dict , a : int ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.num_labels
lowerCAmelCase__ : Any = AlbertForTokenClassification(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : List[Any] = model(a , attention_mask=a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self : List[str] , a : int , a : Any , a : Tuple , a : Tuple , a : List[Any] , a : Dict , a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.num_choices
lowerCAmelCase__ : Tuple = AlbertForMultipleChoice(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : Union[str, Any] = model(
a , attention_mask=a , token_type_ids=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : List[Any] = config_and_inputs
lowerCAmelCase__ : int = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase = (
{
'feature-extraction': AlbertModel,
'fill-mask': AlbertForMaskedLM,
'question-answering': AlbertForQuestionAnswering,
'text-classification': AlbertForSequenceClassification,
'token-classification': AlbertForTokenClassification,
'zero-shot': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = True
def _lowerCamelCase ( self : List[str] , a : Optional[Any] , a : List[Any] , a : List[Any]=False ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = super()._prepare_for_class(a , a , return_labels=a )
if return_labels:
if model_class in get_values(a ):
lowerCAmelCase__ : List[str] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=a )
lowerCAmelCase__ : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a )
return inputs_dict
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = AlbertModelTester(self )
lowerCAmelCase__ : Any = ConfigTester(self , config_class=a , hidden_size=37 )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase__ : Optional[Any] = type
self.model_tester.create_and_check_model(*a )
@slow
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : str = AlbertModel.from_pretrained(a )
self.assertIsNotNone(a )
@require_torch
class A__ ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = AlbertModel.from_pretrained('albert-base-v2' )
lowerCAmelCase__ : Optional[Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
lowerCAmelCase__ : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase__ : Optional[Any] = model(a , attention_mask=a )[0]
lowerCAmelCase__ : int = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , a )
lowerCAmelCase__ : Tuple = torch.tensor(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1E-4 ) )
| 307
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
return "".join([hex(SCREAMING_SNAKE_CASE_ )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE_ )] )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(SCREAMING_SNAKE_CASE_ ) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(SCREAMING_SNAKE_CASE_ ) <= set('0123456789ABCDEF' ):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 307
| 1
|
"""simple docstring"""
from math import sqrt
def UpperCAmelCase ( UpperCAmelCase ) -> int:
snake_case_ = 0
for i in range(1 , int(sqrt(__lowercase ) + 1 ) ):
if n % i == 0 and i != sqrt(__lowercase ):
total += i + n // i
elif i == sqrt(__lowercase ):
total += i
return total - n
def UpperCAmelCase ( UpperCAmelCase = 10000 ) -> int:
snake_case_ = sum(
i
for i in range(1 , __lowercase )
if sum_of_divisors(sum_of_divisors(__lowercase ) ) == i and sum_of_divisors(__lowercase ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 69
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCamelCase = abspath(join(dirname(__file__), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[Any]:
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[int]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__lowercase )
def SCREAMING_SNAKE_CASE( __lowercase ) -> Tuple:
from transformers.testing_utils import pytest_terminal_summary_main
A: Optional[int] = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(__lowercase , id=__lowercase )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Any:
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
A: Tuple = 0
# Doctest custom flag to ignore output.
UpperCamelCase = doctest.register_optionflag('''IGNORE_RESULT''')
UpperCamelCase = doctest.OutputChecker
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> str:
'''simple docstring'''
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase = CustomOutputChecker
UpperCamelCase = HfDoctestModule
UpperCamelCase = HfDocTestParser
| 319
| 0
|
from __future__ import annotations
import time
import numpy as np
lowercase_ = [8, 5, 9, 7]
lowercase_ = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
lowercase_ = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class SCREAMING_SNAKE_CASE :
def __init__( self : Tuple , a : List[Any] , a : Optional[int] , a : int , )-> Tuple:
"""simple docstring"""
lowercase__ = claim_vector
lowercase__ = allocated_resources_table
lowercase__ = maximum_claim_table
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> str:
"""simple docstring"""
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> Tuple:
"""simple docstring"""
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> str:
"""simple docstring"""
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__lowerCamelCase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def SCREAMING_SNAKE_CASE_ ( self : int )-> Dict:
"""simple docstring"""
return {self.__need().index(__lowerCamelCase ): i for i in self.__need()}
def SCREAMING_SNAKE_CASE_ ( self : int , **a : List[Any] )-> List[str]:
"""simple docstring"""
lowercase__ = self.__need()
lowercase__ = self.__allocated_resources_table
lowercase__ = self.__available_resources()
lowercase__ = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('_' * 50 + '\n' )
while need_list:
lowercase__ = False
for each_need in need_list:
lowercase__ = True
for index, need in enumerate(__lowerCamelCase ):
if need > available_resources[index]:
lowercase__ = False
break
if execution:
lowercase__ = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
lowercase__ = original_need_index
print(f"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(__lowerCamelCase )
# update available/freed resources stack
lowercase__ = np.array(__lowerCamelCase ) + np.array(
alloc_resources_table[process_number] )
print(
'Updated available resource stack for processes: '
+ ' '.join([str(__lowerCamelCase ) for x in available_resources] ) )
break
if safe:
print('The process is in a safe state.\n' )
else:
print('System in unsafe state. Aborting...\n' )
break
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Optional[int]:
"""simple docstring"""
print(' ' * 9 + 'Allocated Resource Table' )
for item in self.__allocated_resources_table:
print(
f"""P{self.__allocated_resources_table.index(__lowerCamelCase ) + 1}"""
+ ' '.join(f"""{it:>8}""" for it in item )
+ '\n' )
print(' ' * 9 + 'System Resource Table' )
for item in self.__maximum_claim_table:
print(
f"""P{self.__maximum_claim_table.index(__lowerCamelCase ) + 1}"""
+ ' '.join(f"""{it:>8}""" for it in item )
+ '\n' )
print(
'Current Usage by Active Processes: '
+ ' '.join(str(__lowerCamelCase ) for x in self.__claim_vector ) )
print(
'Initial Available Resources: '
+ ' '.join(str(__lowerCamelCase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367
|
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"""vocab_file""": """spiece.model"""}
lowercase_ = {
"""vocab_file""": {
"""AI-Sweden/gpt-sw3-126m""": """https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-350m""": """https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-1.6b""": """https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-6.7b""": """https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-20b""": """https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model""",
}
}
lowercase_ = {
"""AI-Sweden/gpt-sw3-126m""": 2_048,
"""AI-Sweden/gpt-sw3-350m""": 2_048,
"""AI-Sweden/gpt-sw3-1.6b""": 2_048,
"""AI-Sweden/gpt-sw3-6.7b""": 2_048,
"""AI-Sweden/gpt-sw3-20b""": 2_048,
}
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : List[str] = VOCAB_FILES_NAMES
_UpperCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Any = ['input_ids', 'attention_mask']
def __init__( self : Optional[Any] , a : Tuple , a : Optional[int]=False , a : str=False , a : str=False , a : Tuple=None , a : Any=None , a : Union[str, Any]=None , a : Union[str, Any]=None , a : Optional[Dict[str, Any]] = None , **a : Optional[int] , )-> None:
"""simple docstring"""
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
lowercase__ = kwargs.get('name_or_path' )
if name_or_path is None:
logger.warning(
'name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'
' you are testing the model, this can safely be ignored' )
lowercase__ = 'None'
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
lowercase__ = '<|endoftext|>' if eos_token is None else eos_token
lowercase__ = '<unk>' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
lowercase__ = unk_token if pad_token is None else pad_token
lowercase__ = eos_token if bos_token is None else bos_token
else:
lowercase__ = '<pad>' if pad_token is None else pad_token
lowercase__ = '<s>' if bos_token is None else bos_token
super().__init__(
do_lower_case=a , remove_space=a , keep_accents=a , bos_token=a , eos_token=a , unk_token=a , pad_token=a , sp_model_kwargs=self.sp_model_kwargs , **a , )
lowercase__ = do_lower_case
lowercase__ = remove_space
lowercase__ = keep_accents
lowercase__ = vocab_file
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a )
# Used for whitespace normalization in input texts
# fmt : off
lowercase__ = {' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '', ''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
lowercase__ = re.compile(
f"""[{"".join(map(a , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8_203] ) )}]""" )
def __getstate__( self : Any )-> str:
"""simple docstring"""
lowercase__ = self.__dict__.copy()
lowercase__ = None
return state
def __setstate__( self : int , a : Optional[Any] )-> int:
"""simple docstring"""
lowercase__ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> int:
"""simple docstring"""
return len(self.sp_model )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : str )-> str:
"""simple docstring"""
lowercase__ = self.non_printing_characters_re.sub('' , a )
# Normalize whitespaces
lowercase__ = ''.join([char if char not in self.whitespaces else ' ' for char in text] )
# NFC Unicode normalization
lowercase__ = unicodedata.normalize('NFC' , a )
return text
def SCREAMING_SNAKE_CASE_ ( self : Any , a : str , **a : Tuple )-> List[str]:
"""simple docstring"""
lowercase__ = self.preprocess_text(a )
return self.sp_model.encode(a , out_type=a )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : str )-> int:
"""simple docstring"""
return self.sp_model.PieceToId(a )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : int )-> str:
"""simple docstring"""
return self.sp_model.IdToPiece(a )
@staticmethod
def SCREAMING_SNAKE_CASE_ ( a : str )-> str:
"""simple docstring"""
return out_string
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : List[str] )-> str:
"""simple docstring"""
lowercase__ = []
lowercase__ = ''
lowercase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a ) + token
lowercase__ = True
lowercase__ = []
else:
current_sub_tokens.append(a )
lowercase__ = False
out_string += self.sp_model.decode(a )
return out_string
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Dict[str, int]:
"""simple docstring"""
lowercase__ = {self.convert_ids_to_tokens(a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE_ ( self : Any , a : str , a : Optional[str] = None )-> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a )
elif not os.path.isfile(self.vocab_file ):
with open(a , 'wb' ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(a )
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : Union[str, List[str]] , a : Union[str, bool] = False )-> Union[List[int], List[List[int]], "torch.Tensor"]:
"""simple docstring"""
if isinstance(a , a ):
lowercase__ = self.preprocess_text(a )
lowercase__ = self.sp_model.encode(a )
else:
lowercase__ = [self.preprocess_text(a ) for t in text]
lowercase__ = self.sp_model.encode(a )
if return_tensors is True or return_tensors == "pt":
lowercase__ = torch.tensor(a )
return token_ids
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Union[int, List[int]] )-> str:
"""simple docstring"""
return self.sp_model.decode(a )
def SCREAMING_SNAKE_CASE_ ( self : Any , a : "Conversation" )-> List[int]:
"""simple docstring"""
lowercase__ = [f"""User: {text}""" if is_user else f"""Bot: {text}""" for is_user, text in conversation.iter_texts()]
lowercase__ = (
f"""{self.eos_token}{self.bos_token}""" + f"""{self.bos_token}""".join(a ) + f"""{self.bos_token}Bot:"""
)
return self.encode(text=a )
| 269
| 0
|
import argparse
import os
import re
snake_case_ : Optional[int] = "src/transformers"
# Pattern that looks at the indentation in a line.
snake_case_ : Tuple = re.compile(r"^(\s*)\S")
# Pattern that matches `"key":" and puts `key` in group 0.
snake_case_ : Union[str, Any] = re.compile(r"^\s*\"([^\"]+)\":")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
snake_case_ : List[Any] = re.compile(r"^\s*_import_structure\[\"([^\"]+)\"\]")
# Pattern that matches `"key",` and puts `key` in group 0.
snake_case_ : Optional[Any] = re.compile(r"^\s*\"([^\"]+)\",\s*$")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
snake_case_ : int = re.compile(r"\[([^\]]+)\]")
def A (__A : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = _re_indent.search(lowerCamelCase__ )
return "" if search is None else search.groups()[0]
def A (__A : int , __A : str="" , __A : Optional[int]=None , __A : Union[str, Any]=None ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = 0
UpperCAmelCase_ = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(lowerCamelCase__ ):
index += 1
UpperCAmelCase_ = ['\n'.join(lines[:index] )]
else:
UpperCAmelCase_ = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
UpperCAmelCase_ = [lines[index]]
index += 1
while index < len(lowerCamelCase__ ) and (end_prompt is None or not lines[index].startswith(lowerCamelCase__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowerCamelCase__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(lowerCamelCase__ ) )
if index < len(lowerCamelCase__ ) - 1:
UpperCAmelCase_ = [lines[index + 1]]
index += 1
else:
UpperCAmelCase_ = []
else:
blocks.append('''\n'''.join(lowerCamelCase__ ) )
UpperCAmelCase_ = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowerCamelCase__ ) > 0:
blocks.append('''\n'''.join(lowerCamelCase__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowerCamelCase__ ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def A (__A : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
def _inner(__A : int ):
return key(lowerCamelCase__ ).lower().replace('''_''' , '''''' )
return _inner
def A (__A : Union[str, Any] , __A : int=None ) -> Union[str, Any]:
"""simple docstring"""
def noop(__A : Optional[Any] ):
return x
if key is None:
UpperCAmelCase_ = noop
# Constants are all uppercase, they go first.
UpperCAmelCase_ = [obj for obj in objects if key(lowerCamelCase__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
UpperCAmelCase_ = [obj for obj in objects if key(lowerCamelCase__ )[0].isupper() and not key(lowerCamelCase__ ).isupper()]
# Functions begin with a lowercase, they go last.
UpperCAmelCase_ = [obj for obj in objects if not key(lowerCamelCase__ )[0].isupper()]
UpperCAmelCase_ = ignore_underscore(lowerCamelCase__ )
return sorted(lowerCamelCase__ , key=lowerCamelCase__ ) + sorted(lowerCamelCase__ , key=lowerCamelCase__ ) + sorted(lowerCamelCase__ , key=lowerCamelCase__ )
def A (__A : Optional[int] ) -> str:
"""simple docstring"""
def _replace(__A : Tuple ):
UpperCAmelCase_ = match.groups()[0]
if "," not in imports:
return F"""[{imports}]"""
UpperCAmelCase_ = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
UpperCAmelCase_ = keys[:-1]
return "[" + ", ".join([F"""\"{k}\"""" for k in sort_objects(lowerCamelCase__ )] ) + "]"
UpperCAmelCase_ = import_statement.split('''\n''' )
if len(lowerCamelCase__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
UpperCAmelCase_ = 2 if lines[1].strip() == '[' else 1
UpperCAmelCase_ = [(i, _re_strip_line.search(lowerCamelCase__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
UpperCAmelCase_ = sort_objects(lowerCamelCase__ , key=lambda __A : x[1] )
UpperCAmelCase_ = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowerCamelCase__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
UpperCAmelCase_ = _re_bracket_content.sub(_replace , lines[1] )
else:
UpperCAmelCase_ = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
UpperCAmelCase_ = keys[:-1]
UpperCAmelCase_ = get_indent(lines[1] ) + ', '.join([F"""\"{k}\"""" for k in sort_objects(lowerCamelCase__ )] )
return "\n".join(lowerCamelCase__ )
else:
# Finally we have to deal with imports fitting on one line
UpperCAmelCase_ = _re_bracket_content.sub(_replace , lowerCamelCase__ )
return import_statement
def A (__A : str , __A : Union[str, Any]=True ) -> Dict:
"""simple docstring"""
with open(lowerCamelCase__ , encoding='''utf-8''' ) as f:
UpperCAmelCase_ = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
UpperCAmelCase_ = split_code_in_indented_blocks(
lowerCamelCase__ , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(lowerCamelCase__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
UpperCAmelCase_ = main_blocks[block_idx]
UpperCAmelCase_ = block.split('''\n''' )
# Get to the start of the imports.
UpperCAmelCase_ = 0
while line_idx < len(lowerCamelCase__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
UpperCAmelCase_ = len(lowerCamelCase__ )
else:
line_idx += 1
if line_idx >= len(lowerCamelCase__ ):
continue
# Ignore beginning and last line: they don't contain anything.
UpperCAmelCase_ = '\n'.join(block_lines[line_idx:-1] )
UpperCAmelCase_ = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
UpperCAmelCase_ = split_code_in_indented_blocks(lowerCamelCase__ , indent_level=lowerCamelCase__ )
# We have two categories of import key: list or _import_structure[key].append/extend
UpperCAmelCase_ = _re_direct_key if '_import_structure = {' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
UpperCAmelCase_ = [(pattern.search(lowerCamelCase__ ).groups()[0] if pattern.search(lowerCamelCase__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
UpperCAmelCase_ = [(i, key) for i, key in enumerate(lowerCamelCase__ ) if key is not None]
UpperCAmelCase_ = [x[0] for x in sorted(lowerCamelCase__ , key=lambda __A : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
UpperCAmelCase_ = 0
UpperCAmelCase_ = []
for i in range(len(lowerCamelCase__ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
UpperCAmelCase_ = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(lowerCamelCase__ )
count += 1
# And we put our main block back together with its first and last line.
UpperCAmelCase_ = '\n'.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(lowerCamelCase__ ):
if check_only:
return True
else:
print(F"""Overwriting {file}.""" )
with open(lowerCamelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(lowerCamelCase__ ) )
def A (__A : Dict=True ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = []
for root, _, files in os.walk(lowerCamelCase__ ):
if "__init__.py" in files:
UpperCAmelCase_ = sort_imports(os.path.join(lowerCamelCase__ , '''__init__.py''' ) , check_only=lowerCamelCase__ )
if result:
UpperCAmelCase_ = [os.path.join(lowerCamelCase__ , '''__init__.py''' )]
if len(lowerCamelCase__ ) > 0:
raise ValueError(F"""Would overwrite {len(lowerCamelCase__ )} files, run `make style`.""" )
if __name__ == "__main__":
snake_case_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
snake_case_ : Optional[Any] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 51
|
import csv
import tweepy
# Twitter API credentials
a =""""""
a =""""""
a =""""""
a =""""""
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> None:
# authorize twitter, initialize tweepy
__lowerCamelCase : Tuple = tweepy.OAuthHandler(lowerCamelCase__ , lowerCamelCase__ )
auth.set_access_token(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : Optional[int] = tweepy.API(lowerCamelCase__ )
# initialize a list to hold all the tweepy Tweets
__lowerCamelCase : str = []
# make initial request for most recent tweets (200 is the maximum allowed count)
__lowerCamelCase : Union[str, Any] = api.user_timeline(screen_name=lowerCamelCase__ , count=2_0_0 )
# save most recent tweets
alltweets.extend(lowerCamelCase__ )
# save the id of the oldest tweet less one
__lowerCamelCase : Any = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowerCamelCase__ ) > 0:
print(F"getting tweets before {oldest}" )
# all subsequent requests use the max_id param to prevent duplicates
__lowerCamelCase : str = api.user_timeline(
screen_name=lowerCamelCase__ , count=2_0_0 , max_id=lowerCamelCase__ )
# save most recent tweets
alltweets.extend(lowerCamelCase__ )
# update the id of the oldest tweet less one
__lowerCamelCase : Optional[int] = alltweets[-1].id - 1
print(F"...{len(lowerCamelCase__ )} tweets downloaded so far" )
# transform the tweepy tweets into a 2D array that will populate the csv
__lowerCamelCase : str = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"new_{screen_name}_tweets.csv" , 'w' ) as f:
__lowerCamelCase : Any = csv.writer(lowerCamelCase__ )
writer.writerow(['id', 'created_at', 'text'] )
writer.writerows(lowerCamelCase__ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 73
| 0
|
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
__A = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=7 , lowerCamelCase__=3 , lowerCamelCase__=18 , lowerCamelCase__=30 , lowerCamelCase__=400 , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=None , ) -> str:
'''simple docstring'''
__lowerCamelCase = size if size is not None else {'height': 20, 'width': 20}
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = num_channels
__lowerCamelCase = image_size
__lowerCamelCase = min_resolution
__lowerCamelCase = max_resolution
__lowerCamelCase = size
__lowerCamelCase = do_normalize
__lowerCamelCase = do_convert_rgb
__lowerCamelCase = [512, 1_024, 2_048, 4_096]
__lowerCamelCase = patch_size if patch_size is not None else {'height': 16, 'width': 16}
def lowercase_ ( self ) -> int:
'''simple docstring'''
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'
__lowerCamelCase = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ).convert('RGB' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = PixaStructImageProcessor if is_vision_available() else None
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = PixaStructImageProcessingTester(self )
@property
def lowercase_ ( self ) -> Any:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ , 'do_normalize' ) )
self.assertTrue(hasattr(lowerCamelCase__ , 'do_convert_rgb' ) )
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = self.image_processor_tester.prepare_dummy_image()
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
__lowerCamelCase = 2_048
__lowerCamelCase = image_processor(lowerCamelCase__ , return_tensors='pt' , max_patches=lowerCamelCase__ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.06_06 ) , atol=1e-3 , rtol=1e-3 ) )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , Image.Image )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=lowerCamelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
lowerCamelCase__ , return_tensors='pt' , max_patches=lowerCamelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowercase_ ( self ) -> int:
'''simple docstring'''
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , Image.Image )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
__lowerCamelCase = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(lowerCamelCase__ ):
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=lowerCamelCase__ ).flattened_patches
__lowerCamelCase = 'Hello'
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=lowerCamelCase__ , header_text=lowerCamelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
lowerCamelCase__ , return_tensors='pt' , max_patches=lowerCamelCase__ , header_text=lowerCamelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , np.ndarray )
__lowerCamelCase = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=lowerCamelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
lowerCamelCase__ , return_tensors='pt' , max_patches=lowerCamelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowercase_ ( self ) -> str:
'''simple docstring'''
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , torch.Tensor )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=lowerCamelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
lowerCamelCase__ , return_tensors='pt' , max_patches=lowerCamelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = PixaStructImageProcessor if is_vision_available() else None
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = PixaStructImageProcessingTester(self , num_channels=4 )
__lowerCamelCase = 3
@property
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ , 'do_normalize' ) )
self.assertTrue(hasattr(lowerCamelCase__ , 'do_convert_rgb' ) )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , Image.Image )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=lowerCamelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
lowerCamelCase__ , return_tensors='pt' , max_patches=lowerCamelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 348
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=__magic_name__ )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = field(default='''question-answering-extractive''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
snake_case_ = Features({'''question''': Value('''string''' ), '''context''': Value('''string''' )} )
snake_case_ = Features(
{
'''answers''': Sequence(
{
'''text''': Value('''string''' ),
'''answer_start''': Value('''int32''' ),
} )
} )
snake_case_ = "question"
snake_case_ = "context"
snake_case_ = "answers"
@property
def lowercase_ ( self ) -> Dict[str, str]:
'''simple docstring'''
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 348
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'''andreasmadsen/efficient_mlm_m0.40''': (
'''https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'''
),
}
class __lowerCAmelCase ( A ):
UpperCamelCase = """roberta-prelayernorm"""
def __init__( self : List[Any] , A : Tuple=5_02_65 , A : Optional[Any]=7_68 , A : Union[str, Any]=12 , A : Any=12 , A : str=30_72 , A : List[str]="gelu" , A : Tuple=0.1 , A : Any=0.1 , A : List[str]=5_12 , A : int=2 , A : int=0.0_2 , A : Optional[int]=1E-12 , A : List[str]=1 , A : Optional[Any]=0 , A : List[str]=2 , A : Optional[int]="absolute" , A : Any=True , A : Tuple=None , **A : Union[str, Any] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__)
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = position_embedding_type
_UpperCAmelCase = use_cache
_UpperCAmelCase = classifier_dropout
class __lowerCAmelCase ( A ):
@property
def _lowerCamelCase ( self : List[Any]) -> int:
"""simple docstring"""
if self.task == "multiple-choice":
_UpperCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_UpperCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
])
| 339
|
from torch import nn
class __A ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
super().__init__()
__UpperCamelCase : Dict =class_size
__UpperCamelCase : Any =embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
__UpperCamelCase : Any =nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : List[Any] =self.mlp(lowerCamelCase__ )
return logits
| 71
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : Optional[int] = logging.get_logger(__name__)
__A : List[Any] = {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class __UpperCamelCase ( lowercase__ ):
lowercase : Optional[int] = 'roformer'
def __init__( self :Dict ,_UpperCamelCase :str=5_0_0_0_0 ,_UpperCamelCase :Union[str, Any]=None ,_UpperCamelCase :Any=7_6_8 ,_UpperCamelCase :List[Any]=1_2 ,_UpperCamelCase :List[str]=1_2 ,_UpperCamelCase :Dict=3_0_7_2 ,_UpperCamelCase :Dict="gelu" ,_UpperCamelCase :str=0.1 ,_UpperCamelCase :str=0.1 ,_UpperCamelCase :Union[str, Any]=1_5_3_6 ,_UpperCamelCase :Tuple=2 ,_UpperCamelCase :Optional[Any]=0.02 ,_UpperCamelCase :Optional[Any]=1E-1_2 ,_UpperCamelCase :int=0 ,_UpperCamelCase :Tuple=False ,_UpperCamelCase :Tuple=True ,**_UpperCamelCase :Any ,):
super().__init__(pad_token_id=_UpperCamelCase ,**_UpperCamelCase )
snake_case_ : Optional[int] = vocab_size
snake_case_ : List[str] = hidden_size if embedding_size is None else embedding_size
snake_case_ : Optional[int] = hidden_size
snake_case_ : Any = num_hidden_layers
snake_case_ : str = num_attention_heads
snake_case_ : Dict = hidden_act
snake_case_ : Union[str, Any] = intermediate_size
snake_case_ : int = hidden_dropout_prob
snake_case_ : Optional[Any] = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = max_position_embeddings
snake_case_ : Tuple = type_vocab_size
snake_case_ : List[Any] = initializer_range
snake_case_ : Dict = layer_norm_eps
snake_case_ : List[str] = rotary_value
snake_case_ : int = use_cache
class __UpperCamelCase ( lowercase__ ):
@property
def a__ ( self :int ):
if self.task == "multiple-choice":
snake_case_ : List[str] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case_ : Any = {0: """batch""", 1: """sequence"""}
snake_case_ : str = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 8
|
'''simple docstring'''
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def UpperCAmelCase ( lowerCamelCase_ :str ):
'''simple docstring'''
snake_case_ : Any = tmp_path / """file.csv"""
snake_case_ : Any = textwrap.dedent(
"""\
header1,header2
1,2
10,20
""" )
with open(lowerCamelCase_ , """w""" ) as f:
f.write(lowerCamelCase_ )
return str(lowerCamelCase_ )
@pytest.fixture
def UpperCAmelCase ( lowerCamelCase_ :str ):
'''simple docstring'''
snake_case_ : Optional[int] = tmp_path / """malformed_file.csv"""
snake_case_ : int = textwrap.dedent(
"""\
header1,header2
1,2
10,20,
""" )
with open(lowerCamelCase_ , """w""" ) as f:
f.write(lowerCamelCase_ )
return str(lowerCamelCase_ )
@pytest.fixture
def UpperCAmelCase ( lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :int ):
'''simple docstring'''
snake_case_ : str = tmp_path / """csv_with_image.csv"""
snake_case_ : int = textwrap.dedent(
F'''\
image
{image_file}
''' )
with open(lowerCamelCase_ , """w""" ) as f:
f.write(lowerCamelCase_ )
return str(lowerCamelCase_ )
@pytest.fixture
def UpperCAmelCase ( lowerCamelCase_ :Any ):
'''simple docstring'''
snake_case_ : int = tmp_path / """csv_with_label.csv"""
snake_case_ : Tuple = textwrap.dedent(
"""\
label
good
bad
good
""" )
with open(lowerCamelCase_ , """w""" ) as f:
f.write(lowerCamelCase_ )
return str(lowerCamelCase_ )
@pytest.fixture
def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] ):
'''simple docstring'''
snake_case_ : List[str] = tmp_path / """csv_with_int_list.csv"""
snake_case_ : str = textwrap.dedent(
"""\
int_list
1 2 3
4 5 6
7 8 9
""" )
with open(lowerCamelCase_ , """w""" ) as f:
f.write(lowerCamelCase_ )
return str(lowerCamelCase_ )
def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int , lowerCamelCase_ :Tuple ):
'''simple docstring'''
snake_case_ : int = Csv()
snake_case_ : Optional[Any] = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(lowerCamelCase_ , match="""Error tokenizing data""" ):
for _ in generator:
pass
assert any(
record.levelname == """ERROR"""
and """Failed to read file""" in record.message
and os.path.basename(lowerCamelCase_ ) in record.message
for record in caplog.records )
@require_pil
def UpperCAmelCase ( lowerCamelCase_ :Tuple ):
'''simple docstring'''
with open(lowerCamelCase_ , encoding="""utf-8""" ) as f:
snake_case_ : Tuple = f.read().splitlines()[1]
snake_case_ : str = Csv(encoding="""utf-8""" , features=Features({"""image""": Image()} ) )
snake_case_ : Tuple = csv._generate_tables([[csv_file_with_image]] )
snake_case_ : Optional[Any] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""image""" ).type == Image()()
snake_case_ : List[str] = pa_table.to_pydict()["""image"""]
assert generated_content == [{"path": image_file, "bytes": None}]
def UpperCAmelCase ( lowerCamelCase_ :int ):
'''simple docstring'''
with open(lowerCamelCase_ , encoding="""utf-8""" ) as f:
snake_case_ : List[Any] = f.read().splitlines()[1:]
snake_case_ : Union[str, Any] = Csv(encoding="""utf-8""" , features=Features({"""label""": ClassLabel(names=["""good""", """bad"""] )} ) )
snake_case_ : Optional[Any] = csv._generate_tables([[csv_file_with_label]] )
snake_case_ : Optional[int] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""label""" ).type == ClassLabel(names=["""good""", """bad"""] )()
snake_case_ : Union[str, Any] = pa_table.to_pydict()["""label"""]
assert generated_content == [ClassLabel(names=["""good""", """bad"""] ).straint(lowerCamelCase_ ) for label in labels]
def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] ):
'''simple docstring'''
snake_case_ : str = Csv(encoding="""utf-8""" , sep=""",""" , converters={"""int_list""": lambda lowerCamelCase_ : [int(lowerCamelCase_ ) for i in x.split()]} )
snake_case_ : Optional[Any] = csv._generate_tables([[csv_file_with_int_list]] )
snake_case_ : Tuple = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("""int_list""" ).type )
snake_case_ : Dict = pa_table.to_pydict()["""int_list"""]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 8
| 1
|
"""simple docstring"""
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
__UpperCamelCase = {
'''gwf-440k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''',
'''sample_rate''': 48000,
'''sample_size''': 65536,
},
'''jmann-small-190k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''',
'''sample_rate''': 48000,
'''sample_size''': 65536,
},
'''jmann-large-580k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''',
'''sample_rate''': 48000,
'''sample_size''': 131072,
},
'''maestro-uncond-150k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''',
'''sample_rate''': 16000,
'''sample_size''': 65536,
},
'''unlocked-uncond-250k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''',
'''sample_rate''': 16000,
'''sample_size''': 65536,
},
'''honk-140k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''',
'''sample_rate''': 16000,
'''sample_size''': 65536,
},
}
def lowercase (SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]:
return torch.atana(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) / math.pi * 2
def lowercase (SCREAMING_SNAKE_CASE_ : Optional[int] ) -> int:
SCREAMING_SNAKE_CASE = torch.sin(t * math.pi / 2 ) ** 2
SCREAMING_SNAKE_CASE = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
pass
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCAmelCase__ ) -> int:
super().__init__()
SCREAMING_SNAKE_CASE = DiffusionAttnUnetaD(lowerCAmelCase__ , n_attn_layers=4 )
SCREAMING_SNAKE_CASE = deepcopy(self.diffusion )
SCREAMING_SNAKE_CASE = torch.quasirandom.SobolEngine(1 , scramble=lowerCAmelCase__ )
def lowercase (SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Any:
SCREAMING_SNAKE_CASE = MODELS_MAP[model_name]['url']
os.system(F'wget {url} ./' )
return F'./{model_name}.ckpt'
__UpperCamelCase = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
}
__UpperCamelCase = {
'''8''': '''resnets.0''',
'''9''': '''attentions.0''',
'''10''': '''resnets.1''',
'''11''': '''attentions.1''',
'''12''': '''resnets.2''',
'''13''': '''attentions.2''',
}
__UpperCamelCase = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
'''8''': '''resnets.3''',
'''9''': '''attentions.3''',
'''10''': '''resnets.4''',
'''11''': '''attentions.4''',
'''12''': '''resnets.5''',
'''13''': '''attentions.5''',
}
__UpperCamelCase = {
'''0''': '''resnets.0''',
'''1''': '''resnets.1''',
'''2''': '''resnets.2''',
'''4''': '''resnets.0''',
'''5''': '''resnets.1''',
'''6''': '''resnets.2''',
}
__UpperCamelCase = {
'''skip''': '''conv_skip''',
'''main.0''': '''conv_1''',
'''main.1''': '''group_norm_1''',
'''main.3''': '''conv_2''',
'''main.4''': '''group_norm_2''',
}
__UpperCamelCase = {
'''norm''': '''group_norm''',
'''qkv_proj''': ['''query''', '''key''', '''value'''],
'''out_proj''': ['''proj_attn'''],
}
def lowercase (SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> List[Any]:
if name.startswith('skip' ):
return name.replace('skip' , RES_CONV_MAP['skip'] )
# name has to be of format main.{digit}
if not name.startswith('main.' ):
raise ValueError(F'ResConvBlock error with {name}' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def lowercase (SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Dict:
for key, value in ATTN_MAP.items():
if name.startswith(SCREAMING_SNAKE_CASE_ ) and not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return name.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif name.startswith(SCREAMING_SNAKE_CASE_ ):
return [name.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for v in value]
raise ValueError(F'Attn error with {name}' )
def lowercase (SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple=13 ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = input_string
if string.split('.' )[0] == "timestep_embed":
return string.replace('timestep_embed' , 'time_proj' )
SCREAMING_SNAKE_CASE = 0
if string.startswith('net.3.' ):
depth += 1
SCREAMING_SNAKE_CASE = string[6:]
elif string.startswith('net.' ):
SCREAMING_SNAKE_CASE = string[4:]
while string.startswith('main.7.' ):
depth += 1
SCREAMING_SNAKE_CASE = string[7:]
if string.startswith('main.' ):
SCREAMING_SNAKE_CASE = string[5:]
# mid block
if string[:2].isdigit():
SCREAMING_SNAKE_CASE = string[:2]
SCREAMING_SNAKE_CASE = string[2:]
else:
SCREAMING_SNAKE_CASE = string[0]
SCREAMING_SNAKE_CASE = string[1:]
if depth == max_depth:
SCREAMING_SNAKE_CASE = MID_NUM_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE = 'mid_block'
elif depth > 0 and int(SCREAMING_SNAKE_CASE_ ) < 7:
SCREAMING_SNAKE_CASE = DOWN_NUM_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE = F'down_blocks.{depth}'
elif depth > 0 and int(SCREAMING_SNAKE_CASE_ ) > 7:
SCREAMING_SNAKE_CASE = UP_NUM_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE = F'up_blocks.{max_depth - depth - 1}'
elif depth == 0:
SCREAMING_SNAKE_CASE = DEPTH_0_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE = F'up_blocks.{max_depth - 1}' if int(SCREAMING_SNAKE_CASE_ ) > 3 else 'down_blocks.0'
if not string_left.startswith('.' ):
raise ValueError(F'Naming error with {input_string} and string_left: {string_left}.' )
SCREAMING_SNAKE_CASE = string_left[1:]
if "resnets" in new_layer:
SCREAMING_SNAKE_CASE = convert_resconv_naming(SCREAMING_SNAKE_CASE_ )
elif "attentions" in new_layer:
SCREAMING_SNAKE_CASE = convert_attn_naming(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = new_string_left
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = prefix + '.' + new_layer + '.' + string_left
else:
SCREAMING_SNAKE_CASE = [prefix + '.' + new_layer + '.' + s for s in string_left]
return new_string
def lowercase (SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Dict:
SCREAMING_SNAKE_CASE = {}
for k, v in state_dict.items():
if k.endswith('kernel' ):
# up- and downsample layers, don't have trainable weights
continue
SCREAMING_SNAKE_CASE = rename(SCREAMING_SNAKE_CASE_ )
# check if we need to transform from Conv => Linear for attention
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = transform_conv_attns(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
SCREAMING_SNAKE_CASE = v
return new_state_dict
def lowercase (SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> str:
if len(SCREAMING_SNAKE_CASE_ ) == 1:
if len(v.shape ) == 3:
# weight
SCREAMING_SNAKE_CASE = v[:, :, 0]
else:
# bias
SCREAMING_SNAKE_CASE = v
else:
# qkv matrices
SCREAMING_SNAKE_CASE = v.shape[0]
SCREAMING_SNAKE_CASE = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
SCREAMING_SNAKE_CASE = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
SCREAMING_SNAKE_CASE = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def lowercase (SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
SCREAMING_SNAKE_CASE = args.model_path.split('/' )[-1].split('.' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F'Make sure to provide one of the official model names {MODELS_MAP.keys()}'
SCREAMING_SNAKE_CASE = download(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = MODELS_MAP[model_name]['sample_rate']
SCREAMING_SNAKE_CASE = MODELS_MAP[model_name]['sample_size']
SCREAMING_SNAKE_CASE = Object()
SCREAMING_SNAKE_CASE = sample_size
SCREAMING_SNAKE_CASE = sample_rate
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = UNetaDModel(sample_size=SCREAMING_SNAKE_CASE_ , sample_rate=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = diffusers_model.state_dict()
SCREAMING_SNAKE_CASE = DiffusionUncond(SCREAMING_SNAKE_CASE_ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=SCREAMING_SNAKE_CASE_ )['state_dict'] )
SCREAMING_SNAKE_CASE = orig_model.diffusion_ema.eval()
SCREAMING_SNAKE_CASE = orig_model.state_dict()
SCREAMING_SNAKE_CASE = rename_orig_weights(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
SCREAMING_SNAKE_CASE = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(SCREAMING_SNAKE_CASE_ ) == 0, F'Problem with {renamed_minus_diffusers}'
assert all(k.endswith('kernel' ) for k in list(SCREAMING_SNAKE_CASE_ ) ), F'Problem with {diffusers_minus_renamed}'
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F'Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'
if key == "time_proj.weight":
SCREAMING_SNAKE_CASE = value.squeeze()
SCREAMING_SNAKE_CASE = value
diffusers_model.load_state_dict(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = 1_00
SCREAMING_SNAKE_CASE = 33
SCREAMING_SNAKE_CASE = IPNDMScheduler(num_train_timesteps=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = torch.randn([1, 2, config.sample_size] , generator=SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = torch.linspace(1 , 0 , steps + 1 , device=SCREAMING_SNAKE_CASE_ )[:-1]
SCREAMING_SNAKE_CASE = get_crash_schedule(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = DanceDiffusionPipeline(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE = pipe(num_inference_steps=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).audios
SCREAMING_SNAKE_CASE = sampling.iplms_sample(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , {} )
SCREAMING_SNAKE_CASE = generated.clamp(-1 , 1 )
SCREAMING_SNAKE_CASE = (generated - audio).abs().sum()
SCREAMING_SNAKE_CASE = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('Diff sum' , SCREAMING_SNAKE_CASE_ )
print('Diff max' , SCREAMING_SNAKE_CASE_ )
assert diff_max < 1E-3, F'Diff max: {diff_max} is too much :-/'
print(F'Conversion for {model_name} successful!' )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
__UpperCamelCase = parser.parse_args()
main(args)
| 113
|
"""simple docstring"""
__UpperCamelCase = 0 # The first color of the flag.
__UpperCamelCase = 1 # The second color of the flag.
__UpperCamelCase = 2 # The third color of the flag.
__UpperCamelCase = (red, white, blue)
def lowercase (SCREAMING_SNAKE_CASE_ : list ) -> list:
if not sequence:
return []
if len(SCREAMING_SNAKE_CASE_ ) == 1:
return list(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = len(SCREAMING_SNAKE_CASE_ ) - 1
SCREAMING_SNAKE_CASE = 0
while mid <= high:
if sequence[mid] == colors[0]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = sequence[high], sequence[mid]
high -= 1
else:
SCREAMING_SNAKE_CASE = F'The elements inside the sequence must contains only {colors} values'
raise ValueError(SCREAMING_SNAKE_CASE_ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase = input('''Enter numbers separated by commas:\n''').strip()
__UpperCamelCase = [int(item.strip()) for item in user_input.split(''',''')]
print(f'''{dutch_national_flag_sort(unsorted)}''')
| 113
| 1
|
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
__magic_name__ = logging.getLogger()
def _lowerCAmelCase ( A__: Path , A__: list ):
'''simple docstring'''
UpperCAmelCase = '''\n'''.join(A__ )
Path(A__ ).open('''w''' ).writelines(A__ )
__magic_name__ = "patrickvonplaten/t5-tiny-random"
__magic_name__ = "sshleifer/bart-tiny-random"
__magic_name__ = "sshleifer/tiny-mbart"
__magic_name__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class lowercase ( A__ ):
'''simple docstring'''
def snake_case_ ( self , _snake_case ) -> int:
"""simple docstring"""
UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source'''
UpperCAmelCase = input_file_name.parent / '''utest_output.txt'''
assert not output_file_name.exists()
UpperCAmelCase = [''' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.''']
_dump_articles(_snake_case , _snake_case )
UpperCAmelCase = str(Path(self.get_auto_remove_tmp_dir() ) / '''scores.json''' )
UpperCAmelCase = '''translation_en_to_de''' if model == T5_TINY else '''summarization'''
UpperCAmelCase = f"""
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
""".split()
with patch.object(_snake_case , '''argv''' , _snake_case ):
run_generate()
assert Path(_snake_case ).exists()
# os.remove(Path(output_file_name))
def snake_case_ ( self ) -> Dict:
"""simple docstring"""
self.run_eval_tester(_snake_case )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def snake_case_ ( self , _snake_case ) -> Any:
"""simple docstring"""
self.run_eval_tester(_snake_case )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def snake_case_ ( self , _snake_case ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source'''
UpperCAmelCase = input_file_name.parent / '''utest_output.txt'''
assert not output_file_name.exists()
UpperCAmelCase = {
'''en''': ['''Machine learning is great, isn\'t it?''', '''I like to eat bananas''', '''Tomorrow is another great day!'''],
'''de''': [
'''Maschinelles Lernen ist großartig, oder?''',
'''Ich esse gerne Bananen''',
'''Morgen ist wieder ein toller Tag!''',
],
}
UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() )
UpperCAmelCase = str(tmp_dir / '''scores.json''' )
UpperCAmelCase = str(tmp_dir / '''val.target''' )
_dump_articles(_snake_case , text['''en'''] )
_dump_articles(_snake_case , text['''de'''] )
UpperCAmelCase = '''translation_en_to_de''' if model == T5_TINY else '''summarization'''
UpperCAmelCase = f"""
run_eval_search.py
{model}
{str(_snake_case )}
{str(_snake_case )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
""".split()
testargs.extend(['''--search''', '''num_beams=1:2 length_penalty=0.9:1.0'''] )
with patch.object(_snake_case , '''argv''' , _snake_case ):
with CaptureStdout() as cs:
run_search()
UpperCAmelCase = [''' num_beams | length_penalty''', model, '''Best score args''']
UpperCAmelCase = ['''Info''']
if "translation" in task:
expected_strings.append('''bleu''' )
else:
expected_strings.extend(_snake_case )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(_snake_case ).exists()
os.remove(Path(_snake_case ) )
| 152
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"facebook/deit-base-distilled-patch16-224": (
"https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class lowercase ( A__ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """deit"""
def __init__( self , _snake_case=768 , _snake_case=12 , _snake_case=12 , _snake_case=3072 , _snake_case="gelu" , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=1e-12 , _snake_case=224 , _snake_case=16 , _snake_case=3 , _snake_case=True , _snake_case=16 , **_snake_case , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**_snake_case )
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = qkv_bias
UpperCAmelCase = encoder_stride
class lowercase ( A__ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = version.parse("""1.11""" )
@property
def snake_case_ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def snake_case_ ( self ) -> float:
"""simple docstring"""
return 1e-4
| 152
| 1
|
class __magic_name__ :
def __init__( self , __snake_case ) -> Optional[Any]:
'''simple docstring'''
# we need a list not a string, so do something to change the type
__a =arr.split(',' )
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
__a =[int(self.array[0] )] * len(self.array )
__a =[int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
__a =max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
__a =max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
_lowerCAmelCase : Optional[Any] = input("please input some numbers:")
_lowerCAmelCase : List[Any] = SubArray(whole_array)
_lowerCAmelCase : int = array.solve_sub_array()
print(("the results is:", re))
| 218
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = 'git_vision_model'
def __init__( self , __snake_case=768 , __snake_case=3072 , __snake_case=12 , __snake_case=12 , __snake_case=3 , __snake_case=224 , __snake_case=16 , __snake_case="quick_gelu" , __snake_case=1e-5 , __snake_case=0.0 , __snake_case=0.02 , **__snake_case , ) -> int:
'''simple docstring'''
super().__init__(**__snake_case )
__a =hidden_size
__a =intermediate_size
__a =num_hidden_layers
__a =num_attention_heads
__a =num_channels
__a =patch_size
__a =image_size
__a =initializer_range
__a =attention_dropout
__a =layer_norm_eps
__a =hidden_act
@classmethod
def __magic_name__ ( cls , __snake_case , **__snake_case ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(__snake_case )
__a , __a =cls.get_config_dict(__snake_case , **__snake_case )
# get the vision config dict if we are loading from GITConfig
if config_dict.get('model_type' ) == "git":
__a =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(__snake_case , **__snake_case )
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = 'git'
def __init__( self , __snake_case=None , __snake_case=3_0522 , __snake_case=768 , __snake_case=6 , __snake_case=12 , __snake_case=3072 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=1024 , __snake_case=0.02 , __snake_case=1e-12 , __snake_case=0 , __snake_case="absolute" , __snake_case=True , __snake_case=False , __snake_case=101 , __snake_case=102 , __snake_case=None , **__snake_case , ) -> Optional[int]:
'''simple docstring'''
super().__init__(bos_token_id=__snake_case , eos_token_id=__snake_case , pad_token_id=__snake_case , **__snake_case )
if vision_config is None:
__a ={}
logger.info('vision_config is None. initializing the GitVisionConfig with default values.' )
__a =GitVisionConfig(**__snake_case )
__a =vocab_size
__a =hidden_size
__a =num_hidden_layers
__a =num_attention_heads
__a =hidden_act
__a =intermediate_size
__a =hidden_dropout_prob
__a =attention_probs_dropout_prob
__a =max_position_embeddings
__a =initializer_range
__a =layer_norm_eps
__a =position_embedding_type
__a =use_cache
__a =tie_word_embeddings
__a =num_image_with_embedding
__a =bos_token_id
__a =eos_token_id
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
__a =copy.deepcopy(self.__dict__ )
__a =self.vision_config.to_dict()
__a =self.__class__.model_type
return output
| 218
| 1
|
from __future__ import annotations
def A_ ( A__ , A__ = None , A__ = None ) -> None:
if start is None:
a__ : Optional[Any] = 0
if end is None:
a__ : int = len(_lowerCamelCase ) - 1
if start >= end:
return
a__ : str = (start + end) // 2
slowsort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
slowsort(_lowerCamelCase , mid + 1 , _lowerCamelCase )
if sequence[end] < sequence[mid]:
a__ : List[str] = sequence[mid], sequence[end]
slowsort(_lowerCamelCase , _lowerCamelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 363
|
from __future__ import annotations
from collections.abc import Callable
def A_ ( A__ , A__ , A__ , A__ = 100 , ) -> float:
a__ : Dict = x_start
a__ : Any = fnc(A__ )
a__ : Optional[int] = 0.0
for _ in range(A__ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
a__ : Union[str, Any] = (x_end - x_start) / steps + xa
a__ : str = fnc(A__ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
a__ : Optional[Any] = xa
a__ : Optional[int] = fxa
return area
if __name__ == "__main__":
def A_ ( A__ ) -> List[str]:
return x**3 + x**2
print("""f(x) = x^3 + x^2""")
print("""The area between the curve, x = -5, x = 5 and the x axis is:""")
lowercase : Union[str, Any] = 1_0
while i <= 1_0_0_0_0_0:
print(F"""with {i} steps: {trapezoidal_area(f, -5, 5, i)}""")
i *= 1_0
| 225
| 0
|
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any]=1_0_0 , lowerCAmelCase_ : List[Any]=1_3 , lowerCAmelCase_ : int=3_0 , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : List[Any]=3_2 , lowerCAmelCase_ : Dict=5 , lowerCAmelCase_ : int=4 , lowerCAmelCase_ : Any=3_7 , lowerCAmelCase_ : Any="gelu" , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : Optional[Any]=1_0 , lowerCAmelCase_ : Dict=0.02 , lowerCAmelCase_ : Optional[int]=3 , ):
"""simple docstring"""
_A: Dict = parent
_A: Optional[Any] = vocab_size
_A: Optional[Any] = batch_size
_A: List[Any] = image_size
_A: Any = patch_size
_A: Union[str, Any] = num_channels
_A: str = is_training
_A: Tuple = use_labels
_A: Optional[int] = hidden_size
_A: Tuple = num_hidden_layers
_A: int = num_attention_heads
_A: List[str] = intermediate_size
_A: Union[str, Any] = hidden_act
_A: str = hidden_dropout_prob
_A: Optional[Any] = attention_probs_dropout_prob
_A: Dict = type_sequence_label_size
_A: Any = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_A: List[str] = (image_size // patch_size) ** 2
_A: Dict = num_patches + 1
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
_A: Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A: int = None
if self.use_labels:
_A: List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A: Optional[int] = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] ):
"""simple docstring"""
_A: Union[str, Any] = FlaxBeitModel(config=lowerCAmelCase_ )
_A: List[Any] = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
_A: List[str] = FlaxBeitForMaskedImageModeling(config=lowerCAmelCase_ )
_A: Dict = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] ):
"""simple docstring"""
_A: int = self.type_sequence_label_size
_A: Any = FlaxBeitForImageClassification(config=lowerCAmelCase_ )
_A: Optional[Any] = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_A: Tuple = 1
_A: List[Any] = FlaxBeitForImageClassification(lowerCAmelCase_ )
_A: List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_A: Any = model(lowerCAmelCase_ )
def __magic_name__ ( self : str ):
"""simple docstring"""
_A: Optional[Any] = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) ,
): Union[str, Any] = config_and_inputs
_A: str = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
_A: int = FlaxBeitModelTester(self )
_A: Union[str, Any] = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=3_7 )
def __magic_name__ ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A , _A: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A: Optional[int] = model_class(lowerCAmelCase_ )
_A: Union[str, Any] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A: List[Any] = [*signature.parameters.keys()]
_A: str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def __magic_name__ ( self : str ):
"""simple docstring"""
_A , _A: List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_A: str = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
_A: str = model_class(lowerCAmelCase_ )
@jax.jit
def model_jitted(lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : Dict ):
return model(pixel_values=lowerCAmelCase_ , **lowerCAmelCase_ )
with self.subTest('''JIT Enabled''' ):
_A: Dict = model_jitted(**lowerCAmelCase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_A: Dict = model_jitted(**lowerCAmelCase_ ).to_tuple()
self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) )
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
_A: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase_ )
def __magic_name__ ( self : int ):
"""simple docstring"""
_A: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
@slow
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
_A: str = model_class_name.from_pretrained('''microsoft/beit-base-patch16-224''' )
_A: Dict = model(np.ones((1, 3, 2_2_4, 2_2_4) ) )
self.assertIsNotNone(lowerCAmelCase_ )
def lowerCamelCase__ ( ) -> Optional[int]:
_A: Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def __magic_name__ ( self : Dict ):
"""simple docstring"""
_A: int = FlaxBeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' )
_A: Optional[int] = self.default_image_processor
_A: Any = prepare_img()
_A: Tuple = image_processor(images=lowerCAmelCase_ , return_tensors='''np''' ).pixel_values
# prepare bool_masked_pos
_A: Any = np.ones((1, 1_9_6) , dtype=lowerCAmelCase_ )
# forward pass
_A: int = model(pixel_values=lowerCAmelCase_ , bool_masked_pos=lowerCAmelCase_ )
_A: Tuple = outputs.logits
# verify the logits
_A: Optional[Any] = (1, 1_9_6, 8_1_9_2)
self.assertEqual(logits.shape , lowerCAmelCase_ )
_A: str = np.array(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , lowerCAmelCase_ , atol=1e-2 ) )
@slow
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: Union[str, Any] = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' )
_A: Dict = self.default_image_processor
_A: int = prepare_img()
_A: int = image_processor(images=lowerCAmelCase_ , return_tensors='''np''' )
# forward pass
_A: str = model(**lowerCAmelCase_ )
_A: Optional[int] = outputs.logits
# verify the logits
_A: List[str] = (1, 1_0_0_0)
self.assertEqual(logits.shape , lowerCAmelCase_ )
_A: int = np.array([-1.2385, -1.0987, -1.0108] )
self.assertTrue(np.allclose(logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
_A: Union[str, Any] = 2_8_1
self.assertEqual(logits.argmax(-1 ).item() , lowerCAmelCase_ )
@slow
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: Dict = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' )
_A: Dict = self.default_image_processor
_A: str = prepare_img()
_A: Optional[Any] = image_processor(images=lowerCAmelCase_ , return_tensors='''np''' )
# forward pass
_A: List[Any] = model(**lowerCAmelCase_ )
_A: Union[str, Any] = outputs.logits
# verify the logits
_A: int = (1, 2_1_8_4_1)
self.assertEqual(logits.shape , lowerCAmelCase_ )
_A: List[Any] = np.array([1.6881, -0.2787, 0.5901] )
self.assertTrue(np.allclose(logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
_A: Tuple = 2_3_9_6
self.assertEqual(logits.argmax(-1 ).item() , lowerCAmelCase_ )
| 121
|
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
UpperCAmelCase__ : List[Any] = logging.get_logger(__name__)
logging.set_verbosity_info()
def lowerCamelCase__ ( a , a ) -> Dict:
if "xprophetnet" in prophetnet_checkpoint_path:
_A: List[Any] = XLMProphetNetForConditionalGenerationOld.from_pretrained(a )
_A , _A: Union[str, Any] = XLMProphetNetForConditionalGeneration.from_pretrained(
a , output_loading_info=a )
else:
_A: Dict = ProphetNetForConditionalGenerationOld.from_pretrained(a )
_A , _A: Tuple = ProphetNetForConditionalGeneration.from_pretrained(
a , output_loading_info=a )
_A: Optional[int] = ['''key_proj''', '''value_proj''', '''query_proj''']
_A: List[Any] = {
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
_A: List[str] = key.split('''.''' )
if attributes[0] == "lm_head":
_A: Optional[int] = prophet
_A: Tuple = prophet_old
else:
_A: Tuple = prophet.prophetnet
_A: Any = prophet_old.model
_A: int = False
for attribute in attributes:
if attribute in mapping:
_A: Optional[int] = mapping[attribute]
if not hasattr(a , a ) and len(a ) > 0:
_A: int = attribute
elif hasattr(a , a ):
_A: Tuple = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_A: Union[str, Any] = old_model.weight
logger.info(f"""{attribute} is initialized.""" )
_A: Any = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_A: str = old_model.bias
logger.info(f"""{attribute} is initialized""" )
_A: Dict = True
break
elif attribute in special_keys and hasattr(a , '''in_proj_weight''' ):
_A: Optional[int] = old_model.in_proj_weight.shape[0] // 3
_A: Tuple = getattr(a , a )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_A: List[str] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_A: List[Any] = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_A: int = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_A: Optional[int] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_A: List[Any] = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_A: int = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_A: Tuple = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
_A: Union[str, Any] = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
_A: List[Any] = True
break
if attribute.isdigit():
_A: Tuple = model[int(a )]
_A: int = old_model[int(a )]
else:
_A: Union[str, Any] = getattr(a , a )
if old_attribute == "":
_A: Union[str, Any] = old_model
else:
if not hasattr(a , a ):
raise ValueError(f"""{old_model} does not have {old_attribute}""" )
_A: List[Any] = getattr(a , a )
if not is_key_init:
raise ValueError(f"""{key} was not correctly initialized!""" )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
prophet.save_pretrained(a )
if __name__ == "__main__":
UpperCAmelCase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
UpperCAmelCase__ : Tuple = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 121
| 1
|
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self : Optional[int] , _a : str ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ):
UpperCamelCase__ = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(_a )
def A_ ( self : Union[str, Any] ):
UpperCamelCase__ = '''sshleifer/tiny-gpt2'''
UpperCamelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
UpperCamelCase__ = PyTorchBenchmark(_a )
UpperCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A_ ( self : Optional[Any] ):
UpperCamelCase__ = '''sgugger/tiny-distilbert-classification'''
UpperCamelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , only_pretrain_model=_a , )
UpperCamelCase__ = PyTorchBenchmark(_a )
UpperCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A_ ( self : Optional[Any] ):
UpperCamelCase__ = '''sshleifer/tiny-gpt2'''
UpperCamelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , torchscript=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
UpperCamelCase__ = PyTorchBenchmark(_a )
UpperCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def A_ ( self : Union[str, Any] ):
UpperCamelCase__ = '''sshleifer/tiny-gpt2'''
UpperCamelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , fpaa=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
UpperCamelCase__ = PyTorchBenchmark(_a )
UpperCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A_ ( self : List[str] ):
UpperCamelCase__ = '''sshleifer/tiny-gpt2'''
UpperCamelCase__ = AutoConfig.from_pretrained(_a )
# set architectures equal to `None`
UpperCamelCase__ = None
UpperCamelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
UpperCamelCase__ = PyTorchBenchmark(_a , configs=[config] )
UpperCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A_ ( self : str ):
UpperCamelCase__ = '''sshleifer/tiny-gpt2'''
UpperCamelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
UpperCamelCase__ = PyTorchBenchmark(_a )
UpperCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''' )
def A_ ( self : List[Any] ):
UpperCamelCase__ = '''sshleifer/tiny-gpt2'''
UpperCamelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_a , multi_process=_a , )
UpperCamelCase__ = PyTorchBenchmark(_a )
UpperCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def A_ ( self : List[str] ):
UpperCamelCase__ = '''sshleifer/tiny-gpt2'''
UpperCamelCase__ = AutoConfig.from_pretrained(_a )
UpperCamelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
UpperCamelCase__ = PyTorchBenchmark(_a , configs=[config] )
UpperCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A_ ( self : List[str] ):
UpperCamelCase__ = '''sshleifer/tinier_bart'''
UpperCamelCase__ = AutoConfig.from_pretrained(_a )
UpperCamelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
UpperCamelCase__ = PyTorchBenchmark(_a , configs=[config] )
UpperCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A_ ( self : Union[str, Any] ):
UpperCamelCase__ = '''sshleifer/tiny-gpt2'''
UpperCamelCase__ = AutoConfig.from_pretrained(_a )
UpperCamelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
UpperCamelCase__ = PyTorchBenchmark(_a , configs=[config] )
UpperCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def A_ ( self : List[Any] ):
UpperCamelCase__ = '''sshleifer/tinier_bart'''
UpperCamelCase__ = AutoConfig.from_pretrained(_a )
UpperCamelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
UpperCamelCase__ = PyTorchBenchmark(_a , configs=[config] )
UpperCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def A_ ( self : List[Any] ):
UpperCamelCase__ = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , save_to_csv=_a , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_a , '''inf_time.csv''' ) , train_memory_csv_file=os.path.join(_a , '''train_mem.csv''' ) , inference_memory_csv_file=os.path.join(_a , '''inf_mem.csv''' ) , train_time_csv_file=os.path.join(_a , '''train_time.csv''' ) , env_info_csv_file=os.path.join(_a , '''env.csv''' ) , multi_process=_a , )
UpperCamelCase__ = PyTorchBenchmark(_a )
benchmark.run()
self.assertTrue(Path(os.path.join(_a , '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_a , '''train_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_a , '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_a , '''train_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_a , '''env.csv''' ) ).exists() )
def A_ ( self : List[str] ):
UpperCamelCase__ = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(_a : Tuple ):
self.assertTrue(hasattr(_a , '''sequential''' ) )
self.assertTrue(hasattr(_a , '''cumulative''' ) )
self.assertTrue(hasattr(_a , '''current''' ) )
self.assertTrue(hasattr(_a , '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_a , '''log.txt''' ) , log_print=_a , trace_memory_line_by_line=_a , multi_process=_a , )
UpperCamelCase__ = PyTorchBenchmark(_a )
UpperCamelCase__ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(_a , '''log.txt''' ) ).exists() )
| 35
|
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
lowercase = logging.get_logger(__name__)
class __lowercase ( A ):
'''simple docstring'''
def __init__( self : Any , *_a : Optional[Any] , **_a : Any ):
warnings.warn(
'''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use CLIPImageProcessor instead.''' , _a , )
super().__init__(*_a , **_a )
| 35
| 1
|
from pathlib import Path
import fire
from tqdm import tqdm
def A__ ( __lowerCamelCase="ro", __lowerCamelCase="en", __lowerCamelCase="wmt16", __lowerCamelCase=None ):
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('''run pip install datasets''' )
SCREAMING_SNAKE_CASE_ = F'''{src_lang}-{tgt_lang}'''
print(F'''Converting {dataset}-{pair}''' )
SCREAMING_SNAKE_CASE_ = datasets.load_dataset(UpperCAmelCase_, UpperCAmelCase_ )
if save_dir is None:
SCREAMING_SNAKE_CASE_ = F'''{dataset}-{pair}'''
SCREAMING_SNAKE_CASE_ = Path(UpperCAmelCase_ )
save_dir.mkdir(exist_ok=UpperCAmelCase_ )
for split in ds.keys():
print(F'''Splitting {split} with {ds[split].num_rows} records''' )
# to save to val.source, val.target like summary datasets
SCREAMING_SNAKE_CASE_ = 'val' if split == 'validation' else split
SCREAMING_SNAKE_CASE_ = save_dir.joinpath(F'''{fn}.source''' )
SCREAMING_SNAKE_CASE_ = save_dir.joinpath(F'''{fn}.target''' )
SCREAMING_SNAKE_CASE_ = src_path.open('''w+''' )
SCREAMING_SNAKE_CASE_ = tgt_path.open('''w+''' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
SCREAMING_SNAKE_CASE_ = x['translation']
src_fp.write(ex[src_lang] + '''\n''' )
tgt_fp.write(ex[tgt_lang] + '''\n''' )
print(F'''Saved {dataset} dataset to {save_dir}''' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 299
|
'''simple docstring'''
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
A__ : str = [
# tf -> hf
("""/""", """."""),
("""layer_""", """layers."""),
("""kernel""", """weight"""),
("""beta""", """bias"""),
("""gamma""", """weight"""),
("""pegasus""", """model"""),
]
A__ : Dict = [
(""".output.dense""", """.fc2"""),
("""intermediate.LayerNorm""", """final_layer_norm"""),
("""intermediate.dense""", """fc1"""),
]
A__ : int = (
INIT_COMMON
+ [
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.out_proj"""),
("""attention.self""", """self_attn"""),
("""attention.encdec.LayerNorm""", """encoder_attn_layer_norm"""),
("""attention.encdec_output.dense""", """encoder_attn.out_proj"""),
("""attention.encdec""", """encoder_attn"""),
("""key""", """k_proj"""),
("""value""", """v_proj"""),
("""query""", """q_proj"""),
("""decoder.LayerNorm""", """decoder.layernorm_embedding"""),
]
+ END_COMMON
)
A__ : Tuple = (
INIT_COMMON
+ [
("""embeddings.word_embeddings""", """shared.weight"""),
("""embeddings.position_embeddings""", """embed_positions.weight"""),
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.output"""),
("""attention.self""", """self_attn.self"""),
("""encoder.LayerNorm""", """encoder.layernorm_embedding"""),
]
+ END_COMMON
)
A__ : Tuple = [
"""encdec/key/bias""",
"""encdec/query/bias""",
"""encdec/value/bias""",
"""self/key/bias""",
"""self/query/bias""",
"""self/value/bias""",
"""encdec_output/dense/bias""",
"""attention/output/dense/bias""",
]
def UpperCAmelCase__ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any ) -> List[str]:
for tf_name, hf_name in patterns:
__lowerCamelCase : Optional[int] = k.replace(UpperCAmelCase_ , UpperCAmelCase_ )
return k
def UpperCAmelCase__ ( UpperCAmelCase_ : dict , UpperCAmelCase_ : dict ) -> BigBirdPegasusForConditionalGeneration:
__lowerCamelCase : int = BigBirdPegasusConfig(**UpperCAmelCase_ )
__lowerCamelCase : Any = BigBirdPegasusForConditionalGeneration(UpperCAmelCase_ )
__lowerCamelCase : Union[str, Any] = torch_model.state_dict()
__lowerCamelCase : Tuple = {}
# separating decoder weights
__lowerCamelCase : Dict = {k: tf_weights[k] for k in tf_weights if k.startswith('pegasus/decoder' )}
__lowerCamelCase : str = {k: tf_weights[k] for k in tf_weights if not k.startswith('pegasus/decoder' )}
for k, v in tqdm(decoder_weights.items() , 'tf -> hf conversion' ):
__lowerCamelCase : Tuple = [k.endswith(UpperCAmelCase_ ) for ending in KEYS_TO_IGNORE]
if any(UpperCAmelCase_ ):
continue
__lowerCamelCase : Tuple = DECODER_PATTERNS
__lowerCamelCase : Optional[int] = rename_state_dict_key(UpperCAmelCase_ , UpperCAmelCase_ )
if new_k not in state_dict:
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
__lowerCamelCase : Union[str, Any] = v.T
__lowerCamelCase : str = torch.from_numpy(UpperCAmelCase_ )
assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
for k, v in tqdm(remaining_weights.items() , 'tf -> hf conversion' ):
__lowerCamelCase : Optional[Any] = [k.endswith(UpperCAmelCase_ ) for ending in KEYS_TO_IGNORE]
if any(UpperCAmelCase_ ):
continue
__lowerCamelCase : Dict = REMAINING_PATTERNS
__lowerCamelCase : List[str] = rename_state_dict_key(UpperCAmelCase_ , UpperCAmelCase_ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
__lowerCamelCase : List[str] = v.T
__lowerCamelCase : List[str] = torch.from_numpy(UpperCAmelCase_ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
__lowerCamelCase : Any = mapping['model.embed_positions.weight']
__lowerCamelCase : Union[str, Any] = mapping.pop('model.embed_positions.weight' )
__lowerCamelCase , __lowerCamelCase : List[str] = torch_model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
__lowerCamelCase : Any = [
k
for k in missing
if k
not in [
'final_logits_bias',
'model.encoder.embed_tokens.weight',
'model.decoder.embed_tokens.weight',
'lm_head.weight',
]
]
assert unexpected_missing == [], F'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], F'no matches found for the following tf keys {extra}'
return torch_model
def UpperCAmelCase__ ( UpperCAmelCase_ : Union[str, Any] ) -> Dict:
__lowerCamelCase : int = tf.train.list_variables(UpperCAmelCase_ )
__lowerCamelCase : List[str] = {}
__lowerCamelCase : List[Any] = ['global_step']
for name, shape in tqdm(UpperCAmelCase_ , desc='converting tf checkpoint to dict' ):
__lowerCamelCase : Any = any(pat in name for pat in ignore_name )
if skip_key:
continue
__lowerCamelCase : Dict = tf.train.load_variable(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : List[Any] = array
return tf_weights
def UpperCAmelCase__ ( UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : dict ) -> Union[str, Any]:
__lowerCamelCase : Optional[int] = get_tf_weights_as_numpy(UpperCAmelCase_ )
__lowerCamelCase : List[str] = convert_bigbird_pegasus(UpperCAmelCase_ , UpperCAmelCase_ )
torch_model.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
A__ : Any = argparse.ArgumentParser()
parser.add_argument("""--tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""--save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
A__ : List[str] = parser.parse_args()
A__ : Optional[int] = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 185
| 0
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase : Union[str, Any] = {
'Intel/dpt-large': 'https://huggingface.co/Intel/dpt-large/resolve/main/config.json',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = "dpt"
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : Dict=768 , __SCREAMING_SNAKE_CASE : Any=12 , __SCREAMING_SNAKE_CASE : List[str]=12 , __SCREAMING_SNAKE_CASE : List[Any]=3_072 , __SCREAMING_SNAKE_CASE : List[Any]="gelu" , __SCREAMING_SNAKE_CASE : Optional[int]=0.0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , __SCREAMING_SNAKE_CASE : Any=0.02 , __SCREAMING_SNAKE_CASE : Dict=1E-12 , __SCREAMING_SNAKE_CASE : Optional[Any]=384 , __SCREAMING_SNAKE_CASE : List[Any]=16 , __SCREAMING_SNAKE_CASE : int=3 , __SCREAMING_SNAKE_CASE : int=False , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=[2, 5, 8, 11] , __SCREAMING_SNAKE_CASE : Optional[int]="project" , __SCREAMING_SNAKE_CASE : Tuple=[4, 2, 1, 0.5] , __SCREAMING_SNAKE_CASE : List[Any]=[96, 192, 384, 768] , __SCREAMING_SNAKE_CASE : Optional[int]=256 , __SCREAMING_SNAKE_CASE : Optional[Any]=-1 , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : List[Any]=0.4 , __SCREAMING_SNAKE_CASE : Any=255 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , __SCREAMING_SNAKE_CASE : Any=[1, 1_024, 24, 24] , __SCREAMING_SNAKE_CASE : Any=[0, 1] , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , **__SCREAMING_SNAKE_CASE : List[Any] , ) -> Dict:
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("""Initializing the config with a `BiT` backbone.""" )
__SCREAMING_SNAKE_CASE = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
}
__SCREAMING_SNAKE_CASE = BitConfig(**__SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
logger.info("""Initializing the config with a `BiT` backbone.""" )
__SCREAMING_SNAKE_CASE = BitConfig(**__SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = backbone_config
else:
raise ValueError(
f'backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.' )
__SCREAMING_SNAKE_CASE = backbone_featmap_shape
__SCREAMING_SNAKE_CASE = neck_ignore_stages
if readout_type != "project":
raise ValueError("""Readout type must be 'project' when using `DPT-hybrid` mode.""" )
else:
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = qkv_bias
__SCREAMING_SNAKE_CASE = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("""Readout_type must be one of ['ignore', 'add', 'project']""" )
__SCREAMING_SNAKE_CASE = readout_type
__SCREAMING_SNAKE_CASE = reassemble_factors
__SCREAMING_SNAKE_CASE = neck_hidden_sizes
__SCREAMING_SNAKE_CASE = fusion_hidden_size
__SCREAMING_SNAKE_CASE = head_in_index
__SCREAMING_SNAKE_CASE = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
__SCREAMING_SNAKE_CASE = use_auxiliary_head
__SCREAMING_SNAKE_CASE = auxiliary_loss_weight
__SCREAMING_SNAKE_CASE = semantic_loss_ignore_index
__SCREAMING_SNAKE_CASE = semantic_classifier_dropout
def UpperCAmelCase__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
__SCREAMING_SNAKE_CASE = self.backbone_config.to_dict()
__SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
| 331
|
'''simple docstring'''
import argparse
import os
import re
import packaging.version
UpperCAmelCase : Optional[int] = 'examples/'
UpperCAmelCase : List[str] = {
'examples': (re.compile(R'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(R'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(R'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), R'\1version="VERSION",'),
'doc': (re.compile(R'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
UpperCAmelCase : Union[str, Any] = {
'init': 'src/diffusers/__init__.py',
'setup': 'setup.py',
}
UpperCAmelCase : Tuple = 'README.md'
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
with open(a__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__SCREAMING_SNAKE_CASE = f.read()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = REPLACE_PATTERNS[pattern]
__SCREAMING_SNAKE_CASE = replace.replace("""VERSION""" , a__ )
__SCREAMING_SNAKE_CASE = re_pattern.sub(a__ , a__ )
with open(a__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(a__ )
def a__ ( a__ ):
"""simple docstring"""
for folder, directories, fnames in os.walk(a__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(a__ , a__ ) , a__ , pattern="""examples""" )
def a__ ( a__ , a__=False ):
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(a__ , a__ , a__ )
if not patch:
update_version_in_examples(a__ )
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """🤗 Transformers currently provides the following architectures"""
__SCREAMING_SNAKE_CASE = """1. Want to contribute a new model?"""
with open(a__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__SCREAMING_SNAKE_CASE = f.readlines()
# Find the start of the list.
__SCREAMING_SNAKE_CASE = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__SCREAMING_SNAKE_CASE = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
__SCREAMING_SNAKE_CASE = lines[index].replace(
"""https://huggingface.co/docs/diffusers/main/model_doc""" , """https://huggingface.co/docs/diffusers/model_doc""" , )
index += 1
with open(a__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(a__ )
def a__ ( ):
"""simple docstring"""
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
__SCREAMING_SNAKE_CASE = f.read()
__SCREAMING_SNAKE_CASE = REPLACE_PATTERNS["""init"""][0].search(a__ ).groups()[0]
return packaging.version.parse(a__ )
def a__ ( a__=False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
__SCREAMING_SNAKE_CASE = default_version.base_version
elif patch:
__SCREAMING_SNAKE_CASE = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
__SCREAMING_SNAKE_CASE = F'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
__SCREAMING_SNAKE_CASE = input(F'Which version are you releasing? [{default_version}]' )
if len(a__ ) == 0:
__SCREAMING_SNAKE_CASE = default_version
print(F'Updating version to {version}.' )
global_version_update(a__ , patch=a__ )
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = get_version()
__SCREAMING_SNAKE_CASE = F'{current_version.major}.{current_version.minor + 1}.0.dev0'
__SCREAMING_SNAKE_CASE = current_version.base_version
# Check with the user we got that right.
__SCREAMING_SNAKE_CASE = input(F'Which version are we developing now? [{dev_version}]' )
if len(a__ ) == 0:
__SCREAMING_SNAKE_CASE = dev_version
print(F'Updating version to {version}.' )
global_version_update(a__ )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
UpperCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
UpperCAmelCase : Dict = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 331
| 1
|
def _a ( UpperCamelCase_ : int = 1_000 ) -> int:
"""simple docstring"""
lowerCAmelCase__ = 3
lowerCAmelCase__ = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F"{solution() = }")
| 340
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
'''configuration_convbert''': ['''CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvBertConfig''', '''ConvBertOnnxConfig'''],
'''tokenization_convbert''': ['''ConvBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['''ConvBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvBertForMaskedLM''',
'''ConvBertForMultipleChoice''',
'''ConvBertForQuestionAnswering''',
'''ConvBertForSequenceClassification''',
'''ConvBertForTokenClassification''',
'''ConvBertLayer''',
'''ConvBertModel''',
'''ConvBertPreTrainedModel''',
'''load_tf_weights_in_convbert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFConvBertForMaskedLM''',
'''TFConvBertForMultipleChoice''',
'''TFConvBertForQuestionAnswering''',
'''TFConvBertForSequenceClassification''',
'''TFConvBertForTokenClassification''',
'''TFConvBertLayer''',
'''TFConvBertModel''',
'''TFConvBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 340
| 1
|
"""simple docstring"""
import math
def a__ ( __UpperCamelCase ):
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = F'''Input value of [number={number}] must be an integer'''
raise TypeError(__UpperCamelCase )
if number < 1:
SCREAMING_SNAKE_CASE_ = F'''Input value of [number={number}] must be > 0'''
raise ValueError(__UpperCamelCase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
SCREAMING_SNAKE_CASE_ = int(math.log(number // 3 , 2 ) ) + 2
SCREAMING_SNAKE_CASE_ = [3, 5]
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 3
for block in range(1 , __UpperCamelCase ):
for _ in range(__UpperCamelCase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
A : Dict = 0
try:
A : Optional[Any] = proth(number)
except ValueError:
print(f"ValueError: there is no {number}th Proth number")
continue
print(f"The {number}th Proth number: {value}")
| 356
|
import torch
def a__ ( ):
if torch.cuda.is_available():
SCREAMING_SNAKE_CASE_ = torch.cuda.device_count()
else:
SCREAMING_SNAKE_CASE_ = 0
print(F'''Successfully ran on {num_gpus} GPUs''' )
if __name__ == "__main__":
main()
| 305
| 0
|
"""simple docstring"""
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ):
"""simple docstring"""
if not isinstance(snake_case__ , snake_case__ ):
UpperCamelCase = F"Expected string as input, found {type(snake_case__ )}"
raise ValueError(snake_case__ )
if not isinstance(snake_case__ , snake_case__ ):
UpperCamelCase = F"Expected boolean as use_pascal parameter, found {type(snake_case__ )}"
raise ValueError(snake_case__ )
UpperCamelCase = input_str.split("_" )
UpperCamelCase = 0 if use_pascal else 1
UpperCamelCase = words[start_index:]
UpperCamelCase = [word[0].upper() + word[1:] for word in words_to_capitalize]
UpperCamelCase = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 153
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 306
| 0
|
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCamelCase :
'''simple docstring'''
@staticmethod
def _UpperCAmelCase ( *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
pass
@is_pipeline_test
@require_torch
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
A_ : Dict = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
_a = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' )
_a = [
{
'''image''': Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''question''': '''How many cats are there?''',
},
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''question''': '''How many cats are there?''',
},
]
return vqa_pipeline, examples
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
_a = vqa_pipeline(__UpperCAmelCase , top_k=1 )
self.assertEqual(
__UpperCAmelCase , [
[{'''score''': ANY(__UpperCAmelCase ), '''answer''': ANY(__UpperCAmelCase )}],
[{'''score''': ANY(__UpperCAmelCase ), '''answer''': ANY(__UpperCAmelCase )}],
] , )
@require_torch
def _UpperCAmelCase ( self ) -> str:
_a = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' )
_a = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
_a = '''How many cats are there?'''
_a = vqa_pipeline(image=__UpperCAmelCase , question='''How many cats are there?''' , top_k=2 )
self.assertEqual(
__UpperCAmelCase , [{'''score''': ANY(__UpperCAmelCase ), '''answer''': ANY(__UpperCAmelCase )}, {'''score''': ANY(__UpperCAmelCase ), '''answer''': ANY(__UpperCAmelCase )}] )
_a = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
__UpperCAmelCase , [{'''score''': ANY(__UpperCAmelCase ), '''answer''': ANY(__UpperCAmelCase )}, {'''score''': ANY(__UpperCAmelCase ), '''answer''': ANY(__UpperCAmelCase )}] )
@slow
@require_torch
def _UpperCAmelCase ( self ) -> str:
_a = pipeline('''visual-question-answering''' , model='''dandelin/vilt-b32-finetuned-vqa''' )
_a = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
_a = '''How many cats are there?'''
_a = vqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [{'''score''': 0.8799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}] )
_a = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [{'''score''': 0.8799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}] )
_a = vqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [[{'''score''': 0.8799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}]] * 2 , )
@require_tf
@unittest.skip('''Visual question answering not implemented in TF''' )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
pass
| 153
|
"""simple docstring"""
from pathlib import Path
import fire
from tqdm import tqdm
def A_ ( _lowerCAmelCase : str="ro", _lowerCAmelCase : Optional[Any]="en", _lowerCAmelCase : Union[str, Any]="wmt16", _lowerCAmelCase : int=None ):
"""simple docstring"""
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('''run pip install datasets''' )
_a = f'{src_lang}-{tgt_lang}'
print(f'Converting {dataset}-{pair}' )
_a = datasets.load_dataset(_lowerCAmelCase, _lowerCAmelCase )
if save_dir is None:
_a = f'{dataset}-{pair}'
_a = Path(_lowerCAmelCase )
save_dir.mkdir(exist_ok=_lowerCAmelCase )
for split in ds.keys():
print(f'Splitting {split} with {ds[split].num_rows} records' )
# to save to val.source, val.target like summary datasets
_a = '''val''' if split == '''validation''' else split
_a = save_dir.joinpath(f'{fn}.source' )
_a = save_dir.joinpath(f'{fn}.target' )
_a = src_path.open('''w+''' )
_a = tgt_path.open('''w+''' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
_a = x['''translation''']
src_fp.write(ex[src_lang] + '''\n''' )
tgt_fp.write(ex[tgt_lang] + '''\n''' )
print(f'Saved {dataset} dataset to {save_dir}' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 153
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'''
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = "roformer"
def __init__( self : Optional[int] , _UpperCamelCase : int=5_0_0_0_0 , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : int=7_6_8 , _UpperCamelCase : Dict=1_2 , _UpperCamelCase : Optional[int]=1_2 , _UpperCamelCase : Dict=3_0_7_2 , _UpperCamelCase : List[str]="gelu" , _UpperCamelCase : List[str]=0.1 , _UpperCamelCase : List[Any]=0.1 , _UpperCamelCase : Dict=1_5_3_6 , _UpperCamelCase : Tuple=2 , _UpperCamelCase : int=0.02 , _UpperCamelCase : Any=1e-12 , _UpperCamelCase : Any=0 , _UpperCamelCase : Any=False , _UpperCamelCase : Dict=True , **_UpperCamelCase : List[Any] , ) ->str:
super().__init__(pad_token_id=_UpperCamelCase , **_UpperCamelCase )
snake_case_ = vocab_size
snake_case_ = hidden_size if embedding_size is None else embedding_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = hidden_act
snake_case_ = intermediate_size
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = rotary_value
snake_case_ = use_cache
class snake_case_ ( __A ):
'''simple docstring'''
@property
def snake_case__( self : List[Any] ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
snake_case_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
snake_case_ = {0: '''batch''', 1: '''sequence'''}
snake_case_ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 8
|
from ..utils import DummyObject, requires_backends
class snake_case_ ( metaclass=__A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = ["note_seq"]
def __init__( self : Optional[int] , *_UpperCamelCase : str , **_UpperCamelCase : Optional[int] ) ->Any:
requires_backends(self , ['''note_seq'''] )
@classmethod
def snake_case__( cls : int , *_UpperCamelCase : Any , **_UpperCamelCase : List[Any] ) ->int:
requires_backends(cls , ['''note_seq'''] )
@classmethod
def snake_case__( cls : Dict , *_UpperCamelCase : Optional[int] , **_UpperCamelCase : Union[str, Any] ) ->List[str]:
requires_backends(cls , ['''note_seq'''] )
| 8
| 1
|
from itertools import count
def lowerCamelCase__ ( A__ : int = 50 ):
'''simple docstring'''
__lowerCamelCase = [1] * min_block_length
for n in count(A__ ):
fill_count_functions.append(1 )
for block_length in range(A__ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1000000:
break
return n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 365
|
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class lowerCamelCase__( unittest.TestCase):
@parameterized.expand([(None,), ("""foo.json""",)] )
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: List[str] ):
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCamelCase_ , config_name=UpperCamelCase_ )
__lowerCamelCase = GenerationConfig.from_pretrained(UpperCamelCase_ , config_name=UpperCamelCase_ )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , UpperCamelCase_ )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = AutoConfig.from_pretrained("""gpt2""" )
__lowerCamelCase = GenerationConfig.from_model_config(UpperCamelCase_ )
__lowerCamelCase = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(UpperCamelCase_ , UpperCamelCase_ )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = GenerationConfig()
__lowerCamelCase = {
"""max_new_tokens""": 10_24,
"""foo""": """bar""",
}
__lowerCamelCase = copy.deepcopy(UpperCamelCase_ )
__lowerCamelCase = generation_config.update(**UpperCamelCase_ )
# update_kwargs was not modified (no side effects)
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 10_24 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(UpperCamelCase_ , {"""foo""": """bar"""} )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = GenerationConfig()
__lowerCamelCase = """bar"""
with tempfile.TemporaryDirectory("""test-generation-config""" ) as tmp_dir:
generation_config.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = GenerationConfig.from_pretrained(UpperCamelCase_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , """bar""" )
__lowerCamelCase = GenerationConfig.from_model_config(UpperCamelCase_ )
assert not hasattr(UpperCamelCase_ , """foo""" ) # no new kwargs should be initialized if from config
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , UpperCamelCase_ )
self.assertEqual(default_config.num_beams , 1 )
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , UpperCamelCase_ )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = GenerationConfig.from_pretrained(UpperCamelCase_ , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , UpperCamelCase_ )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class lowerCamelCase__( unittest.TestCase):
@classmethod
def lowerCAmelCase__ ( cls: Optional[Any] ):
__lowerCamelCase = TOKEN
HfFolder.save_token(UpperCamelCase_ )
@classmethod
def lowerCAmelCase__ ( cls: str ):
try:
delete_repo(token=cls._token , repo_id="""test-generation-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""" )
except HTTPError:
pass
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""test-generation-config""" , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained(F'{USER}/test-generation-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-generation-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCamelCase_ , repo_id="""test-generation-config""" , push_to_hub=UpperCamelCase_ , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained(F'{USER}/test-generation-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCamelCase_ , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=UpperCamelCase_ , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
| 29
| 0
|
def snake_case_ ( lowerCAmelCase_ : int = 50000000 ):
__lowercase : Union[str, Any] = set()
__lowercase : Union[str, Any] = int((limit - 24) ** (1 / 2) )
__lowercase : List[str] = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , lowerCAmelCase_ ) ) )
for primea in primes:
__lowercase : Any = primea * primea
for primea in primes:
__lowercase : Any = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
__lowercase : int = primea * primea * primea * primea
__lowercase : Dict = square + cube + tetr
if total >= limit:
break
ret.add(lowerCAmelCase_ )
return len(lowerCAmelCase_ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 233
|
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , __a : int , __a : Tuple=13 , __a : int=32 , __a : List[Any]=3 , __a : Dict=4 , __a : List[str]=[10, 20, 30, 40] , __a : Any=[2, 2, 3, 2] , __a : List[str]=True , __a : Any=True , __a : Optional[Any]=37 , __a : Dict="gelu" , __a : Tuple=10 , __a : Dict=0.02 , __a : Optional[Any]=["stage2", "stage3", "stage4"] , __a : Optional[int]=[2, 3, 4] , __a : int=None , ) -> Optional[Any]:
"""simple docstring"""
__lowercase : List[str] = parent
__lowercase : str = batch_size
__lowercase : List[str] = image_size
__lowercase : int = num_channels
__lowercase : Optional[int] = num_stages
__lowercase : str = hidden_sizes
__lowercase : Dict = depths
__lowercase : List[str] = is_training
__lowercase : Optional[Any] = use_labels
__lowercase : Optional[Any] = intermediate_size
__lowercase : Tuple = hidden_act
__lowercase : List[Any] = num_labels
__lowercase : Dict = initializer_range
__lowercase : List[str] = out_features
__lowercase : str = out_indices
__lowercase : Optional[Any] = scope
def lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase : Optional[int] = None
if self.use_labels:
__lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
__lowercase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__a , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def lowerCAmelCase ( self : int , __a : Tuple , __a : Dict , __a : List[str] ) -> Dict:
"""simple docstring"""
__lowercase : str = ConvNextVaModel(config=__a )
model.to(__a )
model.eval()
__lowercase : Union[str, Any] = model(__a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCAmelCase ( self : Any , __a : Tuple , __a : Any , __a : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = ConvNextVaForImageClassification(__a )
model.to(__a )
model.eval()
__lowercase : Union[str, Any] = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : int , __a : str , __a : Any , __a : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase : Union[str, Any] = ConvNextVaBackbone(config=__a )
model.to(__a )
model.eval()
__lowercase : Dict = model(__a )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__lowercase : List[str] = None
__lowercase : str = ConvNextVaBackbone(config=__a )
model.to(__a )
model.eval()
__lowercase : Optional[Any] = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
__lowercase : Any = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase : Optional[int] = config_and_inputs
__lowercase : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
def lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
__lowercase : Dict = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase : Optional[Any] = config_and_inputs
__lowercase : int = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Any = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
_A : Any = (
{'''feature-extraction''': ConvNextVaModel, '''image-classification''': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
_A : str = False
_A : Dict = False
_A : str = False
_A : str = False
_A : Dict = False
def lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase : List[Any] = ConvNextVaModelTester(self )
__lowercase : Optional[int] = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__lowercase , __lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_with_labels()
__lowercase : str = True
if model_class.__name__ in [
*get_values(__a ),
*get_values(__a ),
]:
continue
__lowercase : Union[str, Any] = model_class(__a )
model.to(__a )
model.train()
__lowercase : Tuple = self._prepare_for_class(__a , __a , return_labels=__a )
__lowercase : Dict = model(**__a ).loss
loss.backward()
def lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__lowercase , __lowercase : Tuple = self.model_tester.prepare_config_and_inputs_with_labels()
__lowercase : Dict = False
__lowercase : Optional[Any] = True
if (
model_class.__name__
in [*get_values(__a ), *get_values(__a )]
or not model_class.supports_gradient_checkpointing
):
continue
__lowercase : List[Any] = model_class(__a )
model.to(__a )
model.gradient_checkpointing_enable()
model.train()
__lowercase : List[str] = self._prepare_for_class(__a , __a , return_labels=__a )
__lowercase : int = model(**__a ).loss
loss.backward()
def lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase , __lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : int = model_class(__a )
__lowercase : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : str = [*signature.parameters.keys()]
__lowercase : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
def check_hidden_states_output(__a : Any , __a : Tuple , __a : Tuple ):
__lowercase : Optional[Any] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : int = model(**self._prepare_for_class(__a , __a ) )
__lowercase : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowercase : Any = self.model_tester.num_stages
self.assertEqual(len(__a ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__lowercase , __lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : int = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase : Optional[Any] = True
check_hidden_states_output(__a , __a , __a )
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : Optional[int] = ConvNextVaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case_ ( ):
__lowercase : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
__lowercase : Optional[Any] = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(__a )
__lowercase : Dict = self.default_image_processor
__lowercase : Any = prepare_img()
__lowercase : int = preprocessor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
__lowercase : int = model(**__a )
# verify the logits
__lowercase : List[str] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
__lowercase : Tuple = torch.tensor([0.9996, 0.1966, -0.4386] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) )
| 233
| 1
|
"""simple docstring"""
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
_lowerCamelCase : List[Any] = logging.get_logger()
# the current default level is logging.WARNING
_lowerCamelCase : int = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(lowerCamelCase_ )
def A_ ( self ):
_lowerCamelCase : Optional[int] = logging.get_verbosity()
_lowerCamelCase : Dict = logging.get_logger('transformers.models.bart.tokenization_bart' )
_lowerCamelCase : Optional[Any] = 'Testing 1, 2, 3'
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(lowerCamelCase_ ) as cl:
logger.warning(lowerCamelCase_ )
self.assertEqual(cl.out , msg + '\n' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(lowerCamelCase_ ) as cl:
logger.warning(lowerCamelCase_ )
self.assertEqual(cl.out , '' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(lowerCamelCase_ ) as cl:
logger.warning(lowerCamelCase_ )
self.assertEqual(cl.out , msg + '\n' )
# restore to the original level
logging.set_verbosity(lowerCamelCase_ )
@mockenv(TRANSFORMERS_VERBOSITY='error' )
def A_ ( self ):
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
_lowerCamelCase : Tuple = logging.get_logger('transformers.models.bart.tokenization_bart' )
_lowerCamelCase : int = os.getenv('TRANSFORMERS_VERBOSITY' , lowerCamelCase_ )
_lowerCamelCase : List[Any] = logging.log_levels[env_level_str]
_lowerCamelCase : Optional[int] = logging.get_verbosity()
self.assertEqual(
lowerCamelCase_ , lowerCamelCase_ , F'''TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}''' , )
# restore to the original level
_lowerCamelCase : Any = ''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='super-error' )
def A_ ( self ):
transformers.utils.logging._reset_library_root_logger()
_lowerCamelCase : Dict = logging.logging.getLogger()
with CaptureLogger(lowerCamelCase_ ) as cl:
# this action activates the env var
logging.get_logger('transformers.models.bart.tokenization_bart' )
self.assertIn('Unknown option TRANSFORMERS_VERBOSITY=super-error' , cl.out )
# no need to restore as nothing was changed
def A_ ( self ):
transformers.utils.logging._reset_library_root_logger()
_lowerCamelCase : List[str] = logging.get_logger('transformers.models.bart.tokenization_bart' )
_lowerCamelCase : Optional[int] = 'Testing 1, 2, 3'
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='1' ):
# nothing should be logged as env var disables this method
with CaptureLogger(lowerCamelCase_ ) as cl:
logger.warning_advice(lowerCamelCase_ )
self.assertEqual(cl.out , '' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(lowerCamelCase_ ) as cl:
logger.warning_advice(lowerCamelCase_ )
self.assertEqual(cl.out , msg + '\n' )
def _snake_case ( ):
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 354
|
"""simple docstring"""
def _snake_case ( lowercase__ ):
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
_lowerCamelCase : List[str] = len(lowercase__ )
_lowerCamelCase : List[str] = max(lowercase__ )
_lowerCamelCase : List[str] = min(lowercase__ )
# create the counting array
_lowerCamelCase : List[Any] = coll_max + 1 - coll_min
_lowerCamelCase : List[Any] = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , lowercase__ ):
_lowerCamelCase : Optional[int] = counting_arr[i] + counting_arr[i - 1]
# create the output collection
_lowerCamelCase : Dict = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , lowercase__ ) ):
_lowerCamelCase : Any = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def _snake_case ( lowercase__ ):
return "".join([chr(lowercase__ ) for i in counting_sort([ord(lowercase__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("""thisisthestring""") == "eghhiiinrsssttt"
lowercase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowercase__ = [int(item) for item in user_input.split(""",""")]
print(counting_sort(unsorted))
| 12
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__a = {
"configuration_gpt_neox_japanese": ["GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXJapaneseConfig"],
"tokenization_gpt_neox_japanese": ["GPTNeoXJapaneseTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXJapaneseForCausalLM",
"GPTNeoXJapaneseLayer",
"GPTNeoXJapaneseModel",
"GPTNeoXJapanesePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 66
|
"""simple docstring"""
from manim import *
class _SCREAMING_SNAKE_CASE( A ):
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[Any] = Rectangle(height=0.5 ,width=0.5 )
__SCREAMING_SNAKE_CASE :List[str] = Rectangle(height=0.4_6 ,width=0.4_6 ).set_stroke(width=0 )
__SCREAMING_SNAKE_CASE :List[str] = [mem.copy() for i in range(6 )]
__SCREAMING_SNAKE_CASE :List[str] = [mem.copy() for i in range(6 )]
__SCREAMING_SNAKE_CASE :Optional[int] = VGroup(*SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ ,buff=0 )
__SCREAMING_SNAKE_CASE :Optional[Any] = VGroup(*SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ ,buff=0 )
__SCREAMING_SNAKE_CASE :Any = VGroup(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ ,buff=0 )
__SCREAMING_SNAKE_CASE :Tuple = Text('''CPU''' ,font_size=24 )
__SCREAMING_SNAKE_CASE :Optional[Any] = Group(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ ,buff=0.5 ,aligned_edge=SCREAMING_SNAKE_CASE__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :List[Any] = [mem.copy() for i in range(1 )]
__SCREAMING_SNAKE_CASE :str = VGroup(*SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ ,buff=0 )
__SCREAMING_SNAKE_CASE :Union[str, Any] = Text('''GPU''' ,font_size=24 )
__SCREAMING_SNAKE_CASE :int = Group(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ ,buff=0.5 ,aligned_edge=SCREAMING_SNAKE_CASE__ )
gpu.align_to(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
gpu.set_x(gpu.get_x() - 1 )
self.add(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :str = [mem.copy() for i in range(6 )]
__SCREAMING_SNAKE_CASE :int = VGroup(*SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ ,buff=0 )
__SCREAMING_SNAKE_CASE :List[Any] = Text('''Model''' ,font_size=24 )
__SCREAMING_SNAKE_CASE :int = Group(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ ,buff=0.5 ,aligned_edge=SCREAMING_SNAKE_CASE__ )
model.move_to([3, -1.0, 0] )
self.play(
Create(SCREAMING_SNAKE_CASE__ ,run_time=1 ) ,Create(SCREAMING_SNAKE_CASE__ ,run_time=1 ) ,Create(SCREAMING_SNAKE_CASE__ ,run_time=1 ) ,)
__SCREAMING_SNAKE_CASE :List[str] = MarkupText(
f'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' ,font_size=24 ,)
__SCREAMING_SNAKE_CASE :List[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__SCREAMING_SNAKE_CASE :Optional[Any] = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(SCREAMING_SNAKE_CASE__ ,run_time=2.5 ) ,Write(SCREAMING_SNAKE_CASE__ ) ,Write(SCREAMING_SNAKE_CASE__ ) )
self.add(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Union[str, Any] = []
__SCREAMING_SNAKE_CASE :int = []
__SCREAMING_SNAKE_CASE :List[Any] = []
for i, rect in enumerate(SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE :Any = Rectangle(height=0.4_6 ,width=0.4_6 ).set_stroke(width=0.0 ).set_fill(SCREAMING_SNAKE_CASE__ ,opacity=0.7 )
cpu_target.move_to(SCREAMING_SNAKE_CASE__ )
cpu_target.generate_target()
__SCREAMING_SNAKE_CASE :Union[str, Any] = 0.4_6 / 4
__SCREAMING_SNAKE_CASE :Tuple = 0.4_6 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) ,buff=0.0_2 ,direction=SCREAMING_SNAKE_CASE__ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target ,direction=SCREAMING_SNAKE_CASE__ ,buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target ,direction=SCREAMING_SNAKE_CASE__ ,buff=0.0 )
cpu_targs.append(SCREAMING_SNAKE_CASE__ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(SCREAMING_SNAKE_CASE__ ) )
second_animations.append(MoveToTarget(SCREAMING_SNAKE_CASE__ ,run_time=1.5 ) )
self.play(*SCREAMING_SNAKE_CASE__ )
self.play(*SCREAMING_SNAKE_CASE__ )
self.wait()
| 191
| 0
|
'''simple docstring'''
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
_SCREAMING_SNAKE_CASE : Any = "src/diffusers"
# Matches is_xxx_available()
_SCREAMING_SNAKE_CASE : Optional[Any] = re.compile(r"is\_([a-z_]*)_available\(\)")
# Matches from xxx import bla
_SCREAMING_SNAKE_CASE : Optional[Any] = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
_SCREAMING_SNAKE_CASE : int = "\n{0} = None\n"
_SCREAMING_SNAKE_CASE : Union[str, Any] = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n"
_SCREAMING_SNAKE_CASE : int = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
def UpperCamelCase_( snake_case : Tuple ):
'''simple docstring'''
snake_case_ = _re_backend.findall(snake_case )
if len(snake_case ) == 0:
return None
return "_and_".join(snake_case )
def UpperCamelCase_( ):
'''simple docstring'''
with open(os.path.join(snake_case , "__init__.py" ) , "r" , encoding="utf-8" , newline="\n" ) as f:
snake_case_ = f.readlines()
# Get to the point we do the actual imports for type checking
snake_case_ = 0
snake_case_ = {}
# Go through the end of the file
while line_index < len(snake_case ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
snake_case_ = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("else:" ):
line_index += 1
line_index += 1
snake_case_ = []
# Until we unindent, add backend objects to the list
while line_index < len(snake_case ) and len(lines[line_index] ) > 1:
snake_case_ = lines[line_index]
snake_case_ = _re_single_line_import.search(snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(snake_case ) > 0:
snake_case_ = objects
else:
line_index += 1
return backend_specific_objects
def UpperCamelCase_( snake_case : Any , snake_case : Dict ):
'''simple docstring'''
if name.isupper():
return DUMMY_CONSTANT.format(snake_case )
elif name.islower():
return DUMMY_FUNCTION.format(snake_case , snake_case )
else:
return DUMMY_CLASS.format(snake_case , snake_case )
def UpperCamelCase_( snake_case : List[str]=None ):
'''simple docstring'''
if backend_specific_objects is None:
snake_case_ = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
snake_case_ = {}
for backend, objects in backend_specific_objects.items():
snake_case_ = "[" + ", ".join(f'"{b}"' for b in backend.split("_and_" ) ) + "]"
snake_case_ = "# This file is autogenerated by the command `make fix-copies`, do not edit.\n"
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(snake_case , snake_case ) for o in objects] )
snake_case_ = dummy_file
return dummy_files
def UpperCamelCase_( snake_case : Union[str, Any]=False ):
'''simple docstring'''
snake_case_ = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
snake_case_ = {"torch": "pt"}
# Locate actual dummy modules and read their content.
snake_case_ = os.path.join(snake_case , "utils" )
snake_case_ = {
backend: os.path.join(snake_case , f'dummy_{short_names.get(snake_case , snake_case )}_objects.py' )
for backend in dummy_files.keys()
}
snake_case_ = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(snake_case ):
with open(snake_case , "r" , encoding="utf-8" , newline="\n" ) as f:
snake_case_ = f.read()
else:
snake_case_ = ""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f'Updating diffusers.utils.dummy_{short_names.get(snake_case , snake_case )}_objects.py as the main '
"__init__ has new objects." )
with open(dummy_file_paths[backend] , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"The main __init__ has objects that are not present in "
f'diffusers.utils.dummy_{short_names.get(snake_case , snake_case )}_objects.py. Run `make fix-copies` '
"to fix this." )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 371
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
_SCREAMING_SNAKE_CASE : Any = False
class _snake_case ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = VersatileDiffusionImageVariationPipeline.from_pretrained("shi-labs/versatile-diffusion" )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
snake_case_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
image=a__ , generator=a__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
snake_case_ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
snake_case_ = np.array([0.0_4_4_1, 0.0_4_6_9, 0.0_5_0_7, 0.0_5_7_5, 0.0_6_3_2, 0.0_6_5_0, 0.0_8_6_5, 0.0_9_0_9, 0.0_9_4_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 92
| 0
|
'''simple docstring'''
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
snake_case__ : List[str] = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
snake_case__ : Union[str, Any] = 0
while b > 0:
if b & 1:
snake_case__ : str = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 35
|
'''simple docstring'''
from PIL import Image
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Image:
def brightness(_lowerCAmelCase ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(_lowerCAmelCase )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change brightness to 100
__a = change_brightness(img, 100)
brigt_img.save("image_data/lena_brightness.png", format="png")
| 35
| 1
|
'''simple docstring'''
def __lowerCamelCase ( __lowerCAmelCase : Dict = 10_00 ) -> List[str]:
return sum(e for e in range(3 , __a ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 371
|
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Tuple )-> Optional[Any]:
snake_case = 0
def lowerCAmelCase ( self : str )-> Any:
snake_case = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[Any] )-> str:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
snake_case = Path(__snake_case ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[str] )-> Optional[Any]:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
snake_case = Path(__snake_case ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : Tuple )-> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = CLIPConfig()
# Create a dummy config file with image_proceesor_type
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
snake_case = Path(__snake_case ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
snake_case = AutoImageProcessor.from_pretrained(__snake_case ).to_dict()
config_dict.pop("""image_processor_type""" )
snake_case = CLIPImageProcessor(**__snake_case )
# save in new folder
model_config.save_pretrained(__snake_case )
config.save_pretrained(__snake_case )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
# make sure private variable is not incorrectly saved
snake_case = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[Any] )-> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : int )-> Dict:
with self.assertRaisesRegex(
__snake_case , """clip-base is not a local folder and is not a valid model identifier""" ):
snake_case = AutoImageProcessor.from_pretrained("""clip-base""" )
def lowerCAmelCase ( self : Tuple )-> int:
with self.assertRaisesRegex(
__snake_case , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
snake_case = AutoImageProcessor.from_pretrained(__snake_case , revision="""aaaaaa""" )
def lowerCAmelCase ( self : str )-> Union[str, Any]:
with self.assertRaisesRegex(
__snake_case , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
snake_case = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" )
def lowerCAmelCase ( self : List[str] )-> List[str]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__snake_case ):
snake_case = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__snake_case ):
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case )
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__snake_case )
snake_case = AutoImageProcessor.from_pretrained(__snake_case , trust_remote_code=__snake_case )
self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" )
def lowerCAmelCase ( self : List[str] )-> Dict:
try:
AutoConfig.register("""custom""" , __snake_case )
AutoImageProcessor.register(__snake_case , __snake_case )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__snake_case ):
AutoImageProcessor.register(__snake_case , __snake_case )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
snake_case = Path(__snake_case ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) )
snake_case = CustomImageProcessor.from_pretrained(__snake_case )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__snake_case )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase ( self : Dict )-> Optional[int]:
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = True
try:
AutoConfig.register("""custom""" , __snake_case )
AutoImageProcessor.register(__snake_case , __snake_case )
# If remote code is not set, the default is to use local
snake_case = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(not hasattr(__snake_case , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 3
| 0
|
'''simple docstring'''
def a_ ( ) -> Union[str, Any]:
__lowerCamelCase : Any = 0
for i in range(1 ,1001 ):
total += i**i
return str(_lowerCAmelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 208
|
'''simple docstring'''
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
_UpperCamelCase = 8
def a_ ( _lowerCAmelCase ,_lowerCAmelCase=BITS ) -> Any:
__lowerCamelCase : List[str] = x.device
__lowerCamelCase : int = (x * 255).int().clamp(0 ,255 )
__lowerCamelCase : Any = 2 ** torch.arange(bits - 1 ,-1 ,-1 ,device=_lowerCAmelCase )
__lowerCamelCase : List[str] = rearrange(_lowerCAmelCase ,'d -> d 1 1' )
__lowerCamelCase : Optional[Any] = rearrange(_lowerCAmelCase ,'b c h w -> b c 1 h w' )
__lowerCamelCase : Optional[Any] = ((x & mask) != 0).float()
__lowerCamelCase : Tuple = rearrange(_lowerCAmelCase ,'b c d h w -> b (c d) h w' )
__lowerCamelCase : Union[str, Any] = bits * 2 - 1
return bits
def a_ ( _lowerCAmelCase ,_lowerCAmelCase=BITS ) -> Optional[Any]:
__lowerCamelCase : int = x.device
__lowerCamelCase : int = (x > 0).int()
__lowerCamelCase : List[str] = 2 ** torch.arange(bits - 1 ,-1 ,-1 ,device=_lowerCAmelCase ,dtype=torch.intaa )
__lowerCamelCase : Any = rearrange(_lowerCAmelCase ,'d -> d 1 1' )
__lowerCamelCase : Any = rearrange(_lowerCAmelCase ,'b (c d) h w -> b c d h w' ,d=8 )
__lowerCamelCase : List[str] = reduce(x * mask ,'b c d h w -> b c h w' ,'sum' )
return (dec / 255).clamp(0.0 ,1.0 )
def a_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = 0.0 ,_lowerCAmelCase = True ,_lowerCAmelCase=None ,_lowerCAmelCase = True ,) -> Union[DDIMSchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
__lowerCamelCase : Optional[Any] = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
__lowerCamelCase : str = self.alphas_cumprod[timestep]
__lowerCamelCase : Optional[Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
__lowerCamelCase : Union[str, Any] = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowerCamelCase : Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
__lowerCamelCase : Dict = self.bit_scale
if self.config.clip_sample:
__lowerCamelCase : Optional[Any] = torch.clamp(_lowerCAmelCase ,-scale ,_lowerCAmelCase )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
__lowerCamelCase : int = self._get_variance(_lowerCAmelCase ,_lowerCAmelCase )
__lowerCamelCase : Tuple = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
__lowerCamelCase : List[str] = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowerCamelCase : Union[str, Any] = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowerCamelCase : Any = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
__lowerCamelCase : Dict = model_output.device if torch.is_tensor(_lowerCAmelCase ) else 'cpu'
__lowerCamelCase : Dict = torch.randn(model_output.shape ,dtype=model_output.dtype ,generator=_lowerCAmelCase ).to(_lowerCAmelCase )
__lowerCamelCase : Optional[Any] = self._get_variance(_lowerCAmelCase ,_lowerCAmelCase ) ** 0.5 * eta * noise
__lowerCamelCase : int = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=_lowerCAmelCase ,pred_original_sample=_lowerCAmelCase )
def a_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase="epsilon" ,_lowerCAmelCase=None ,_lowerCAmelCase = True ,) -> Union[DDPMSchedulerOutput, Tuple]:
__lowerCamelCase : Dict = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
__lowerCamelCase ,__lowerCamelCase : int = torch.split(_lowerCAmelCase ,sample.shape[1] ,dim=1 )
else:
__lowerCamelCase : Optional[Any] = None
# 1. compute alphas, betas
__lowerCamelCase : Optional[int] = self.alphas_cumprod[t]
__lowerCamelCase : Optional[int] = self.alphas_cumprod[t - 1] if t > 0 else self.one
__lowerCamelCase : Any = 1 - alpha_prod_t
__lowerCamelCase : Any = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
__lowerCamelCase : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
__lowerCamelCase : Any = model_output
else:
raise ValueError(F'Unsupported prediction_type {prediction_type}.' )
# 3. Clip "predicted x_0"
__lowerCamelCase : Optional[Any] = self.bit_scale
if self.config.clip_sample:
__lowerCamelCase : Union[str, Any] = torch.clamp(_lowerCAmelCase ,-scale ,_lowerCAmelCase )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowerCamelCase : Union[str, Any] = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
__lowerCamelCase : Dict = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowerCamelCase : Dict = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
__lowerCamelCase : Optional[Any] = 0
if t > 0:
__lowerCamelCase : Any = torch.randn(
model_output.size() ,dtype=model_output.dtype ,layout=model_output.layout ,generator=_lowerCAmelCase ).to(model_output.device )
__lowerCamelCase : Optional[Any] = (self._get_variance(_lowerCAmelCase ,predicted_variance=_lowerCAmelCase ) ** 0.5) * noise
__lowerCamelCase : str = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=_lowerCAmelCase ,pred_original_sample=_lowerCAmelCase )
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self : List[str] , _a : UNetaDConditionModel , _a : Union[DDIMScheduler, DDPMScheduler] , _a : Optional[float] = 1.0 , ) -> Union[str, Any]:
super().__init__()
__lowerCamelCase : Dict = bit_scale
__lowerCamelCase : List[str] = (
ddim_bit_scheduler_step if isinstance(_a , _a ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=_a , scheduler=_a )
@torch.no_grad()
def __call__( self : Tuple , _a : Optional[int] = 256 , _a : Optional[int] = 256 , _a : Optional[int] = 50 , _a : Optional[torch.Generator] = None , _a : Optional[int] = 1 , _a : Optional[str] = "pil" , _a : bool = True , **_a : Dict , ) -> Union[Tuple, ImagePipelineOutput]:
__lowerCamelCase : str = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=_a , )
__lowerCamelCase : int = decimal_to_bits(_a ) * self.bit_scale
__lowerCamelCase : Dict = latents.to(self.device )
self.scheduler.set_timesteps(_a )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
__lowerCamelCase : int = self.unet(_a , _a ).sample
# compute the previous noisy sample x_t -> x_t-1
__lowerCamelCase : List[Any] = self.scheduler.step(_a , _a , _a ).prev_sample
__lowerCamelCase : Dict = bits_to_decimal(_a )
if output_type == "pil":
__lowerCamelCase : int = self.numpy_to_pil(_a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_a )
| 208
| 1
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self : int ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def snake_case_ ( self : Tuple ):
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" , from_pt=A , dtype=jnp.bfloataa )
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=A , from_pt=A , dtype=jnp.bfloataa )
_UpperCAmelCase : Dict = controlnet_params
_UpperCAmelCase : Optional[int] = "bird"
_UpperCAmelCase : Optional[int] = jax.device_count()
_UpperCAmelCase : Union[str, Any] = pipe.prepare_text_inputs([prompts] * num_samples )
_UpperCAmelCase : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" )
_UpperCAmelCase : Optional[int] = pipe.prepare_image_inputs([canny_image] * num_samples )
_UpperCAmelCase : Tuple = jax.random.PRNGKey(0 )
_UpperCAmelCase : Optional[Any] = jax.random.split(A , jax.device_count() )
_UpperCAmelCase : Optional[int] = replicate(A )
_UpperCAmelCase : Union[str, Any] = shard(A )
_UpperCAmelCase : int = shard(A )
_UpperCAmelCase : Tuple = pipe(
prompt_ids=A , image=A , params=A , prng_seed=A , num_inference_steps=5_0 , jit=A , ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
_UpperCAmelCase : int = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_UpperCAmelCase : Union[str, Any] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
_UpperCAmelCase : Any = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_UpperCAmelCase : Optional[int] = jnp.array(
[0.167_969, 0.116_699, 0.081_543, 0.154_297, 0.132_812, 0.108_887, 0.169_922, 0.169_922, 0.205_078] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def snake_case_ ( self : Optional[Any] ):
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" , from_pt=A , dtype=jnp.bfloataa )
_UpperCAmelCase , _UpperCAmelCase : Tuple = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=A , from_pt=A , dtype=jnp.bfloataa )
_UpperCAmelCase : Optional[int] = controlnet_params
_UpperCAmelCase : Any = "Chef in the kitchen"
_UpperCAmelCase : List[str] = jax.device_count()
_UpperCAmelCase : Optional[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
_UpperCAmelCase : Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" )
_UpperCAmelCase : Tuple = pipe.prepare_image_inputs([pose_image] * num_samples )
_UpperCAmelCase : List[Any] = jax.random.PRNGKey(0 )
_UpperCAmelCase : Dict = jax.random.split(A , jax.device_count() )
_UpperCAmelCase : str = replicate(A )
_UpperCAmelCase : Tuple = shard(A )
_UpperCAmelCase : Optional[int] = shard(A )
_UpperCAmelCase : Optional[int] = pipe(
prompt_ids=A , image=A , params=A , prng_seed=A , num_inference_steps=5_0 , jit=A , ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
_UpperCAmelCase : int = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_UpperCAmelCase : List[str] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
_UpperCAmelCase : Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_UpperCAmelCase : Optional[Any] = jnp.array(
[[0.271_484, 0.261_719, 0.275_391, 0.277_344, 0.279_297, 0.291_016, 0.294_922, 0.302_734, 0.302_734]] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 202
|
"""simple docstring"""
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def __snake_case ( *SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Union[Dict, Any]] = None , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Tuple=2 ) -> Optional[Any]:
'''simple docstring'''
from .. import __version__
_UpperCAmelCase : Tuple = take_from
_UpperCAmelCase : Optional[int] = ()
if not isinstance(args[0] , SCREAMING_SNAKE_CASE__ ):
_UpperCAmelCase : int = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(SCREAMING_SNAKE_CASE__ ).base_version ) >= version.parse(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
f'The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''
f' version {__version__} is >= {version_name}' )
_UpperCAmelCase : Any = None
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(SCREAMING_SNAKE_CASE__ ),)
_UpperCAmelCase : Tuple = f'The `{attribute}` argument is deprecated and will be removed in version {version_name}.'
elif hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
values += (getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ),)
_UpperCAmelCase : str = f'The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'
elif deprecated_kwargs is None:
_UpperCAmelCase : Tuple = f'`{attribute}` is deprecated and will be removed in version {version_name}.'
if warning is not None:
_UpperCAmelCase : Optional[int] = warning + " " if standard_warn else ""
warnings.warn(warning + message , SCREAMING_SNAKE_CASE__ , stacklevel=SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and len(SCREAMING_SNAKE_CASE__ ) > 0:
_UpperCAmelCase : List[Any] = inspect.getouterframes(inspect.currentframe() )[1]
_UpperCAmelCase : Optional[int] = call_frame.filename
_UpperCAmelCase : Dict = call_frame.lineno
_UpperCAmelCase : List[Any] = call_frame.function
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f'{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`' )
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return
elif len(SCREAMING_SNAKE_CASE__ ) == 1:
return values[0]
return values
| 202
| 1
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__A : Any = logging.get_logger(__name__)
__A : Any = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
__A : Optional[Any] = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def lowercase ( __snake_case : List[str] , __snake_case : List[Any] , __snake_case : Tuple , __snake_case : Dict , __snake_case : Tuple ):
for attribute in key.split('''.''' ):
lowercase_ : Union[str, Any] = getattr(__snake_case , __snake_case )
if weight_type is not None:
lowercase_ : Any = getattr(__snake_case , __snake_case ).shape
else:
lowercase_ : Union[str, Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
lowercase_ : int = value
elif weight_type == "weight_g":
lowercase_ : Tuple = value
elif weight_type == "weight_v":
lowercase_ : Optional[Any] = value
elif weight_type == "bias":
lowercase_ : List[Any] = value
elif weight_type == "running_mean":
lowercase_ : Optional[Any] = value
elif weight_type == "running_var":
lowercase_ : List[Any] = value
elif weight_type == "num_batches_tracked":
lowercase_ : Optional[Any] = value
elif weight_type == "inv_freq":
lowercase_ : List[Any] = value
else:
lowercase_ : Union[str, Any] = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def lowercase ( __snake_case : str , __snake_case : Union[str, Any] , __snake_case : Optional[int] ):
lowercase_ : Optional[int] = []
lowercase_ : List[Any] = fairseq_model.state_dict()
lowercase_ : Optional[int] = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
lowercase_ : int = False
if "conv_layers" in name:
load_conv_layer(
__snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == '''group''' , )
lowercase_ : Any = True
else:
for key, mapped_key in MAPPING.items():
lowercase_ : Optional[int] = '''wav2vec2_conformer.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowercase_ : List[str] = True
if "*" in mapped_key:
lowercase_ : int = name.split(__snake_case )[0].split('''.''' )[-2]
lowercase_ : Any = mapped_key.replace('''*''' , __snake_case )
if "pos_bias_u" in name:
lowercase_ : List[Any] = None
elif "pos_bias_v" in name:
lowercase_ : Optional[Any] = None
elif "weight_g" in name:
lowercase_ : Tuple = '''weight_g'''
elif "weight_v" in name:
lowercase_ : Dict = '''weight_v'''
elif "bias" in name:
lowercase_ : str = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase_ : Dict = '''weight'''
elif "running_mean" in name:
lowercase_ : Any = '''running_mean'''
elif "inv_freq" in name:
lowercase_ : Union[str, Any] = '''inv_freq'''
elif "running_var" in name:
lowercase_ : int = '''running_var'''
elif "num_batches_tracked" in name:
lowercase_ : Any = '''num_batches_tracked'''
else:
lowercase_ : Optional[int] = None
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(F'''Unused weights: {unused_weights}''' )
def lowercase ( __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : Optional[int] , __snake_case : Any , __snake_case : Union[str, Any] ):
lowercase_ : Tuple = full_name.split('''conv_layers.''' )[-1]
lowercase_ : List[Any] = name.split('''.''' )
lowercase_ : Union[str, Any] = int(items[0] )
lowercase_ : str = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
lowercase_ : Tuple = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
lowercase_ : Tuple = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
lowercase_ : Any = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
lowercase_ : Optional[int] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__snake_case )
@torch.no_grad()
def lowercase ( __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Tuple=None , __snake_case : Tuple=None , __snake_case : str=True ):
if config_path is not None:
lowercase_ : Dict = WavaVecaConformerConfig.from_pretrained(__snake_case , hidden_act='''swish''' )
else:
lowercase_ : Dict = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
lowercase_ : str = '''rotary'''
if is_finetuned:
if dict_path:
lowercase_ : Tuple = Dictionary.load(__snake_case )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowercase_ : Tuple = target_dict.pad_index
lowercase_ : List[str] = target_dict.bos_index
lowercase_ : Dict = target_dict.eos_index
lowercase_ : List[str] = len(target_dict.symbols )
lowercase_ : Tuple = os.path.join(__snake_case , '''vocab.json''' )
if not os.path.isdir(__snake_case ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__snake_case ) )
return
os.makedirs(__snake_case , exist_ok=__snake_case )
lowercase_ : Union[str, Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
lowercase_ : Tuple = 0
lowercase_ : List[str] = 1
with open(__snake_case , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(__snake_case , __snake_case )
lowercase_ : Optional[int] = WavaVecaCTCTokenizer(
__snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__snake_case , )
lowercase_ : int = True if config.feat_extract_norm == '''layer''' else False
lowercase_ : Any = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=__snake_case , return_attention_mask=__snake_case , )
lowercase_ : Union[str, Any] = WavaVecaProcessor(feature_extractor=__snake_case , tokenizer=__snake_case )
processor.save_pretrained(__snake_case )
lowercase_ : Tuple = WavaVecaConformerForCTC(__snake_case )
else:
lowercase_ : Union[str, Any] = WavaVecaConformerForPreTraining(__snake_case )
if is_finetuned:
lowercase_ , lowercase_ , lowercase_ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
lowercase_ : List[Any] = argparse.Namespace(task='''audio_pretraining''' )
lowercase_ : Union[str, Any] = fairseq.tasks.setup_task(__snake_case )
lowercase_ , lowercase_ , lowercase_ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__snake_case )
lowercase_ : str = model[0].eval()
recursively_load_weights(__snake_case , __snake_case , not is_finetuned )
hf_wavavec.save_pretrained(__snake_case )
if __name__ == "__main__":
__A : List[str] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
__A : int = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 33
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a__ = {
"""configuration_wav2vec2""": ["""WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Wav2Vec2Config"""],
"""feature_extraction_wav2vec2""": ["""Wav2Vec2FeatureExtractor"""],
"""processing_wav2vec2""": ["""Wav2Vec2Processor"""],
"""tokenization_wav2vec2""": ["""Wav2Vec2CTCTokenizer""", """Wav2Vec2Tokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
"""WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Wav2Vec2ForAudioFrameClassification""",
"""Wav2Vec2ForCTC""",
"""Wav2Vec2ForMaskedLM""",
"""Wav2Vec2ForPreTraining""",
"""Wav2Vec2ForSequenceClassification""",
"""Wav2Vec2ForXVector""",
"""Wav2Vec2Model""",
"""Wav2Vec2PreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
"""TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFWav2Vec2ForCTC""",
"""TFWav2Vec2Model""",
"""TFWav2Vec2PreTrainedModel""",
"""TFWav2Vec2ForSequenceClassification""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
"""FlaxWav2Vec2ForCTC""",
"""FlaxWav2Vec2ForPreTraining""",
"""FlaxWav2Vec2Model""",
"""FlaxWav2Vec2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 317
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = DPTConfig(embedding_type='hybrid' )
if "large" in checkpoint_url:
lowerCamelCase_ = 10_24
lowerCamelCase_ = 40_96
lowerCamelCase_ = 24
lowerCamelCase_ = 16
lowerCamelCase_ = [5, 11, 17, 23]
lowerCamelCase_ = [2_56, 5_12, 10_24, 10_24]
lowerCamelCase_ = (1, 3_84, 3_84)
if "nyu" or "midas" in checkpoint_url:
lowerCamelCase_ = 7_68
lowerCamelCase_ = [1, 1, 1, 0.5]
lowerCamelCase_ = [2_56, 5_12, 7_68, 7_68]
lowerCamelCase_ = 1_50
lowerCamelCase_ = 16
lowerCamelCase_ = (1, 3_84, 3_84)
lowerCamelCase_ = False
lowerCamelCase_ = 'project'
if "ade" in checkpoint_url:
lowerCamelCase_ = True
lowerCamelCase_ = 7_68
lowerCamelCase_ = [1, 1, 1, 0.5]
lowerCamelCase_ = 1_50
lowerCamelCase_ = 16
lowerCamelCase_ = 'huggingface/label-files'
lowerCamelCase_ = 'ade20k-id2label.json'
lowerCamelCase_ = json.load(open(cached_download(hf_hub_url(lowercase , lowercase , repo_type='dataset' ) ) , 'r' ) )
lowerCamelCase_ = {int(lowercase ): v for k, v in idalabel.items()}
lowerCamelCase_ = idalabel
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
lowerCamelCase_ = [1, 1_50, 4_80, 4_80]
return config, expected_shape
def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase_ = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(lowercase , lowercase )
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] ):
'''simple docstring'''
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowerCamelCase_ = name.replace('pretrained.model' , 'dpt.encoder' )
if "pretrained.model" in name:
lowerCamelCase_ = name.replace('pretrained.model' , 'dpt.embeddings' )
if "patch_embed" in name:
lowerCamelCase_ = name.replace('patch_embed' , '' )
if "pos_embed" in name:
lowerCamelCase_ = name.replace('pos_embed' , 'position_embeddings' )
if "attn.proj" in name:
lowerCamelCase_ = name.replace('attn.proj' , 'attention.output.dense' )
if "proj" in name and "project" not in name:
lowerCamelCase_ = name.replace('proj' , 'projection' )
if "blocks" in name:
lowerCamelCase_ = name.replace('blocks' , 'layer' )
if "mlp.fc1" in name:
lowerCamelCase_ = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
lowerCamelCase_ = name.replace('mlp.fc2' , 'output.dense' )
if "norm1" in name and "backbone" not in name:
lowerCamelCase_ = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name and "backbone" not in name:
lowerCamelCase_ = name.replace('norm2' , 'layernorm_after' )
if "scratch.output_conv" in name:
lowerCamelCase_ = name.replace('scratch.output_conv' , 'head' )
if "scratch" in name:
lowerCamelCase_ = name.replace('scratch' , 'neck' )
if "layer1_rn" in name:
lowerCamelCase_ = name.replace('layer1_rn' , 'convs.0' )
if "layer2_rn" in name:
lowerCamelCase_ = name.replace('layer2_rn' , 'convs.1' )
if "layer3_rn" in name:
lowerCamelCase_ = name.replace('layer3_rn' , 'convs.2' )
if "layer4_rn" in name:
lowerCamelCase_ = name.replace('layer4_rn' , 'convs.3' )
if "refinenet" in name:
lowerCamelCase_ = int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowerCamelCase_ = name.replace(f"""refinenet{layer_idx}""" , f"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
lowerCamelCase_ = name.replace('out_conv' , 'projection' )
if "resConfUnit1" in name:
lowerCamelCase_ = name.replace('resConfUnit1' , 'residual_layer1' )
if "resConfUnit2" in name:
lowerCamelCase_ = name.replace('resConfUnit2' , 'residual_layer2' )
if "conv1" in name:
lowerCamelCase_ = name.replace('conv1' , 'convolution1' )
if "conv2" in name:
lowerCamelCase_ = name.replace('conv2' , 'convolution2' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowerCamelCase_ = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0' )
if "pretrained.act_postprocess2.0.project.0" in name:
lowerCamelCase_ = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0' )
if "pretrained.act_postprocess3.0.project.0" in name:
lowerCamelCase_ = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0' )
if "pretrained.act_postprocess4.0.project.0" in name:
lowerCamelCase_ = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowerCamelCase_ = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection' )
if "pretrained.act_postprocess1.4" in name:
lowerCamelCase_ = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize' )
if "pretrained.act_postprocess2.3" in name:
lowerCamelCase_ = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection' )
if "pretrained.act_postprocess2.4" in name:
lowerCamelCase_ = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize' )
if "pretrained.act_postprocess3.3" in name:
lowerCamelCase_ = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection' )
if "pretrained.act_postprocess4.3" in name:
lowerCamelCase_ = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection' )
if "pretrained.act_postprocess4.4" in name:
lowerCamelCase_ = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize' )
if "pretrained" in name:
lowerCamelCase_ = name.replace('pretrained' , 'dpt' )
if "bn" in name:
lowerCamelCase_ = name.replace('bn' , 'batch_norm' )
if "head" in name:
lowerCamelCase_ = name.replace('head' , 'head.head' )
if "encoder.norm" in name:
lowerCamelCase_ = name.replace('encoder.norm' , 'layernorm' )
if "auxlayer" in name:
lowerCamelCase_ = name.replace('auxlayer' , 'auxiliary_head.head' )
if "backbone" in name:
lowerCamelCase_ = name.replace('backbone' , 'backbone.bit.encoder' )
if ".." in name:
lowerCamelCase_ = name.replace('..' , '.' )
if "stem.conv" in name:
lowerCamelCase_ = name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
lowerCamelCase_ = name.replace('blocks' , 'layers' )
if "convolution" in name and "backbone" in name:
lowerCamelCase_ = name.replace('convolution' , 'conv' )
if "layer" in name and "backbone" in name:
lowerCamelCase_ = name.replace('layer' , 'layers' )
if "backbone.bit.encoder.bit" in name:
lowerCamelCase_ = name.replace('backbone.bit.encoder.bit' , 'backbone.bit' )
if "embedder.conv" in name:
lowerCamelCase_ = name.replace('embedder.conv' , 'embedder.convolution' )
if "backbone.bit.encoder.stem.norm" in name:
lowerCamelCase_ = name.replace('backbone.bit.encoder.stem.norm' , 'backbone.bit.embedder.norm' )
return name
def _SCREAMING_SNAKE_CASE ( lowercase : Any , lowercase : int ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase_ = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
lowerCamelCase_ = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase_ = in_proj_weight[: config.hidden_size, :]
lowerCamelCase_ = in_proj_bias[: config.hidden_size]
lowerCamelCase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase_ = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase_ = in_proj_bias[-config.hidden_size :]
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCamelCase_ = Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : Optional[Any] , lowercase : List[str] , lowercase : List[Any] , lowercase : Optional[int] ):
'''simple docstring'''
lowerCamelCase_ , lowerCamelCase_ = get_dpt_config(lowercase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
lowerCamelCase_ = torch.load(lowercase , map_location='cpu' )
# remove certain keys
remove_ignore_keys_(lowercase )
# rename keys
for key in state_dict.copy().keys():
lowerCamelCase_ = state_dict.pop(lowercase )
lowerCamelCase_ = val
# read in qkv matrices
read_in_q_k_v(lowercase , lowercase )
# load HuggingFace model
lowerCamelCase_ = DPTForSemanticSegmentation(lowercase ) if 'ade' in checkpoint_url else DPTForDepthEstimation(lowercase )
model.load_state_dict(lowercase )
model.eval()
# Check outputs on an image
lowerCamelCase_ = 4_80 if 'ade' in checkpoint_url else 3_84
lowerCamelCase_ = DPTImageProcessor(size=lowercase )
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(lowercase , return_tensors='pt' )
# forward pass
lowerCamelCase_ = model(**lowercase ).logits if 'ade' in checkpoint_url else model(**lowercase ).predicted_depth
if show_prediction:
lowerCamelCase_ = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='bicubic' , align_corners=lowercase , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 2_55 ).show()
if pytorch_dump_folder_path is not None:
Path(lowercase ).mkdir(exist_ok=lowercase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowercase )
if push_to_hub:
model.push_to_hub('ybelkada/dpt-hybrid-midas' )
image_processor.push_to_hub('ybelkada/dpt-hybrid-midas' )
if __name__ == "__main__":
lowerCamelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
type=str,
help="URL of the original DPT checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
parser.add_argument(
"--model_name",
default="dpt-large",
type=str,
help="Name of the model, in case you're pushing to the hub.",
)
parser.add_argument(
"--show_prediction",
action="store_true",
)
lowerCamelCase : List[str] = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 365
|
from ..utils import DummyObject, requires_backends
class A( metaclass=UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = ['''transformers''', '''torch''', '''note_seq''']
def __init__( self : Any , *A_ : Any , **A_ : Tuple ) -> List[Any]:
"""simple docstring"""
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def a__ ( cls : Tuple , *A_ : Dict , **A_ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def a__ ( cls : int , *A_ : Dict , **A_ : List[str] ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
| 208
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.