code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import os
def __lowerCamelCase ( ) -> str:
"""simple docstring"""
with open(os.path.dirname(SCREAMING_SNAKE_CASE_ ) + "/p022_names.txt" ) as file:
SCREAMING_SNAKE_CASE_ : List[Any] = str(file.readlines()[0] )
SCREAMING_SNAKE_CASE_ : List[str] = names.replace("\"" , "" ).split("," )
names.sort()
SCREAMING_SNAKE_CASE_ : Any = 0
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
for i, name in enumerate(SCREAMING_SNAKE_CASE_ ):
for letter in name:
name_score += ord(SCREAMING_SNAKE_CASE_ ) - 6_4
total_score += (i + 1) * name_score
SCREAMING_SNAKE_CASE_ : int = 0
return total_score
if __name__ == "__main__":
print(solution())
| 68 |
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = str(SCREAMING_SNAKE_CASE_ )
return len(SCREAMING_SNAKE_CASE_ ) == 9 and set(SCREAMING_SNAKE_CASE_ ) == set("123456789" )
def __lowerCamelCase ( ) -> int | None:
"""simple docstring"""
for base_num in range(9_9_9_9 , 4_9_9_9 , -1 ):
SCREAMING_SNAKE_CASE_ : int = 1_0_0_0_0_2 * base_num
if is_9_pandigital(SCREAMING_SNAKE_CASE_ ):
return candidate
for base_num in range(3_3_3 , 9_9 , -1 ):
SCREAMING_SNAKE_CASE_ : List[str] = 1_0_0_2_0_0_3 * base_num
if is_9_pandigital(SCREAMING_SNAKE_CASE_ ):
return candidate
return None
if __name__ == "__main__":
print(F'''{solution() = }''')
| 68 | 1 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __lowerCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Tuple = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=SCREAMING_SNAKE_CASE_ )
env_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
launch_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
tpu_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
test_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
# Let's go
SCREAMING_SNAKE_CASE_ : Tuple = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE_ , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 68 |
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__( self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = True , lowercase__ = None , lowercase__ = False , lowercase__ = None , lowercase__ = True , lowercase__ = "arrow" , **lowercase__ , ):
"""simple docstring"""
super().__init__(
split=lowercase__ , features=lowercase__ , cache_dir=lowercase__ , keep_in_memory=lowercase__ , streaming=lowercase__ , **lowercase__ , )
SCREAMING_SNAKE_CASE_ : Any = load_from_cache_file
SCREAMING_SNAKE_CASE_ : Optional[int] = file_format
SCREAMING_SNAKE_CASE_ : List[Any] = Spark(
df=lowercase__ , features=lowercase__ , cache_dir=lowercase__ , working_dir=lowercase__ , **lowercase__ , )
def __lowerCamelCase ( self ):
"""simple docstring"""
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
SCREAMING_SNAKE_CASE_ : Optional[int] = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=lowercase__ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 68 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
snake_case_ = {
'configuration_vision_text_dual_encoder': ['VisionTextDualEncoderConfig'],
'processing_vision_text_dual_encoder': ['VisionTextDualEncoderProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['VisionTextDualEncoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['FlaxVisionTextDualEncoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['TFVisionTextDualEncoderModel']
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 68 |
'''simple docstring'''
import re
import string
import numpy as np
import datasets
snake_case_ = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
snake_case_ = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
snake_case_ = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION,_KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def __lowerCamelCase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , reference_urls=[] , )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__=None , lowercase__=False , lowercase__=False , lowercase__=False , ):
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array([re.sub(lowercase__ , "" , lowercase__ ) for x in predictions] )
SCREAMING_SNAKE_CASE_ : List[Any] = np.array([re.sub(lowercase__ , "" , lowercase__ ) for x in references] )
else:
SCREAMING_SNAKE_CASE_ : int = np.asarray(lowercase__ )
SCREAMING_SNAKE_CASE_ : Any = np.asarray(lowercase__ )
if ignore_case:
SCREAMING_SNAKE_CASE_ : Dict = np.char.lower(lowercase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = np.char.lower(lowercase__ )
if ignore_punctuation:
SCREAMING_SNAKE_CASE_ : Optional[int] = string.punctuation.maketrans("" , "" , string.punctuation )
SCREAMING_SNAKE_CASE_ : int = np.char.translate(lowercase__ , table=lowercase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.char.translate(lowercase__ , table=lowercase__ )
if ignore_numbers:
SCREAMING_SNAKE_CASE_ : Optional[int] = string.digits.maketrans("" , "" , string.digits )
SCREAMING_SNAKE_CASE_ : Dict = np.char.translate(lowercase__ , table=lowercase__ )
SCREAMING_SNAKE_CASE_ : int = np.char.translate(lowercase__ , table=lowercase__ )
SCREAMING_SNAKE_CASE_ : str = predictions == references
return {"exact_match": np.mean(lowercase__ ) * 100}
| 68 | 1 |
'''simple docstring'''
from __future__ import annotations
import pandas as pd
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : int ) -> list[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = [0] * no_of_processes
SCREAMING_SNAKE_CASE_ : List[Any] = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = burst_time[i]
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : List[str] = 0
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 9_9_9_9_9_9_9_9_9
SCREAMING_SNAKE_CASE_ : str = 0
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(SCREAMING_SNAKE_CASE_ ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
SCREAMING_SNAKE_CASE_ : Tuple = remaining_time[j]
SCREAMING_SNAKE_CASE_ : List[Any] = j
SCREAMING_SNAKE_CASE_ : List[Any] = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
SCREAMING_SNAKE_CASE_ : Optional[int] = remaining_time[short]
if minm == 0:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 9_9_9_9_9_9_9_9_9
if remaining_time[short] == 0:
complete += 1
SCREAMING_SNAKE_CASE_ : Optional[int] = False
# Find finish time of current process
SCREAMING_SNAKE_CASE_ : Optional[Any] = increment_time + 1
# Calculate waiting time
SCREAMING_SNAKE_CASE_ : Any = finish_time - arrival_time[short]
SCREAMING_SNAKE_CASE_ : List[Any] = finar - burst_time[short]
if waiting_time[short] < 0:
SCREAMING_SNAKE_CASE_ : str = 0
# Increment time
increment_time += 1
return waiting_time
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list[int] ) -> list[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = [0] * no_of_processes
for i in range(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ : int = burst_time[i] + waiting_time[i]
return turn_around_time
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : int ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = 0
SCREAMING_SNAKE_CASE_ : List[str] = 0
for i in range(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ : Any = total_waiting_time + waiting_time[i]
SCREAMING_SNAKE_CASE_ : Dict = total_turn_around_time + turn_around_time[i]
print(F"Average waiting time = {total_waiting_time / no_of_processes:.5f}" )
print("Average turn around time =" , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('Enter how many process you want to analyze')
snake_case_ = int(input())
snake_case_ = [0] * no_of_processes
snake_case_ = [0] * no_of_processes
snake_case_ = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('Enter the arrival time and burst time for process:--' + str(i + 1))
snake_case_ , snake_case_ = map(int, input().split())
snake_case_ = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
snake_case_ = burst_time
snake_case_ = no_of_processes
snake_case_ = waiting_time
snake_case_ = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
snake_case_ = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'Process',
'BurstTime',
'ArrivalTime',
'WaitingTime',
'TurnAroundTime',
],
)
# Printing the dataFrame
pd.set_option('display.max_rows', fcfs.shape[0] + 1)
print(fcfs)
| 68 |
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case_ = logging.get_logger(__name__)
snake_case_ = {'vocab_file': 'spiece.model'}
snake_case_ = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
snake_case_ = {
'AI-Sweden/gpt-sw3-126m': 2_0_4_8,
'AI-Sweden/gpt-sw3-350m': 2_0_4_8,
'AI-Sweden/gpt-sw3-1.6b': 2_0_4_8,
'AI-Sweden/gpt-sw3-6.7b': 2_0_4_8,
'AI-Sweden/gpt-sw3-20b': 2_0_4_8,
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = ["input_ids", "attention_mask"]
def __init__( self , lowercase__ , lowercase__=False , lowercase__=False , lowercase__=False , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__ = None , **lowercase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
SCREAMING_SNAKE_CASE_ : Dict = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
SCREAMING_SNAKE_CASE_ : str = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
SCREAMING_SNAKE_CASE_ : List[Any] = "<|endoftext|>" if eos_token is None else eos_token
SCREAMING_SNAKE_CASE_ : Dict = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
SCREAMING_SNAKE_CASE_ : Tuple = unk_token if pad_token is None else pad_token
SCREAMING_SNAKE_CASE_ : Optional[Any] = eos_token if bos_token is None else bos_token
else:
SCREAMING_SNAKE_CASE_ : int = "<pad>" if pad_token is None else pad_token
SCREAMING_SNAKE_CASE_ : Any = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=lowercase__ , remove_space=lowercase__ , keep_accents=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , unk_token=lowercase__ , pad_token=lowercase__ , sp_model_kwargs=self.sp_model_kwargs , **lowercase__ , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = do_lower_case
SCREAMING_SNAKE_CASE_ : Optional[int] = remove_space
SCREAMING_SNAKE_CASE_ : int = keep_accents
SCREAMING_SNAKE_CASE_ : List[Any] = vocab_file
SCREAMING_SNAKE_CASE_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase__ )
# Used for whitespace normalization in input texts
# fmt : off
SCREAMING_SNAKE_CASE_ : int = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(
F"[{''.join(map(lowercase__ , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]" )
def __getstate__( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : Dict = None
return state
def __setstate__( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {}
SCREAMING_SNAKE_CASE_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def __lowerCamelCase ( self ):
"""simple docstring"""
return len(self.sp_model )
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.non_printing_characters_re.sub("" , lowercase__ )
# Normalize whitespaces
SCREAMING_SNAKE_CASE_ : List[str] = "".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
SCREAMING_SNAKE_CASE_ : List[Any] = unicodedata.normalize("NFC" , lowercase__ )
return text
def __lowerCamelCase ( self , lowercase__ , **lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.preprocess_text(lowercase__ )
return self.sp_model.encode(lowercase__ , out_type=lowercase__ )
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
return self.sp_model.PieceToId(lowercase__ )
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
return self.sp_model.IdToPiece(lowercase__ )
@staticmethod
def __lowerCamelCase ( lowercase__ ):
"""simple docstring"""
return out_string
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
SCREAMING_SNAKE_CASE_ : Any = ""
SCREAMING_SNAKE_CASE_ : Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase__ ) + token
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
SCREAMING_SNAKE_CASE_ : int = []
else:
current_sub_tokens.append(lowercase__ )
SCREAMING_SNAKE_CASE_ : List[str] = False
out_string += self.sp_model.decode(lowercase__ )
return out_string
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = {self.convert_ids_to_tokens(lowercase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCamelCase ( self , lowercase__ , lowercase__ = None ):
"""simple docstring"""
if not os.path.isdir(lowercase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE_ : Any = os.path.join(
lowercase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase__ , "wb" ) as fi:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(lowercase__ )
return (out_vocab_file,)
def __lowerCamelCase ( self , lowercase__ , lowercase__ = False ):
"""simple docstring"""
if isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = self.preprocess_text(lowercase__ )
SCREAMING_SNAKE_CASE_ : Any = self.sp_model.encode(lowercase__ )
else:
SCREAMING_SNAKE_CASE_ : str = [self.preprocess_text(lowercase__ ) for t in text]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.sp_model.encode(lowercase__ )
if return_tensors is True or return_tensors == "pt":
SCREAMING_SNAKE_CASE_ : str = torch.tensor(lowercase__ )
return token_ids
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
return self.sp_model.decode(lowercase__ )
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [F"User: {text}" if is_user else F"Bot: {text}" for is_user, text in conversation.iter_texts()]
SCREAMING_SNAKE_CASE_ : List[str] = (
F"{self.eos_token}{self.bos_token}" + F"{self.bos_token}".join(lowercase__ ) + F"{self.bos_token}Bot:"
)
return self.encode(text=lowercase__ )
| 68 | 1 |
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : list[int] ) -> bool:
"""simple docstring"""
return len(set(SCREAMING_SNAKE_CASE_ ) ) == len(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 68 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
snake_case_ = True
except (ImportError, ModuleNotFoundError):
snake_case_ = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
re.sub("<n>" , "" , SCREAMING_SNAKE_CASE_ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(SCREAMING_SNAKE_CASE_ ) )
| 68 | 1 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
snake_case_ = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
snake_case_ = {
'facebook/blenderbot_small-90M': 5_1_2,
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = BlenderbotSmallTokenizer
def __init__( self , lowercase__=None , lowercase__=None , lowercase__="<|endoftext|>" , lowercase__="<|endoftext|>" , lowercase__="<|endoftext|>" , lowercase__=False , lowercase__=True , **lowercase__ , ):
"""simple docstring"""
super().__init__(
ByteLevelBPETokenizer(
vocab=lowercase__ , merges=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ , ) , bos_token=lowercase__ , eos_token=lowercase__ , unk_token=lowercase__ , **lowercase__ , )
SCREAMING_SNAKE_CASE_ : Dict = add_prefix_space
def __lowerCamelCase ( self , lowercase__ , lowercase__=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowerCamelCase ( self , lowercase__ , lowercase__ = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 68 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self , lowercase__ , lowercase__=2 , lowercase__=3 , lowercase__=4 , lowercase__=2 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=99 , lowercase__=36 , lowercase__=2 , lowercase__=4 , lowercase__=37 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=512 , lowercase__=16 , lowercase__=2 , lowercase__=0.02 , lowercase__=6 , lowercase__=6 , lowercase__=3 , lowercase__=4 , lowercase__=None , lowercase__=1000 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = parent
SCREAMING_SNAKE_CASE_ : Optional[int] = batch_size
SCREAMING_SNAKE_CASE_ : Dict = num_channels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_size
SCREAMING_SNAKE_CASE_ : Optional[int] = patch_size
SCREAMING_SNAKE_CASE_ : str = is_training
SCREAMING_SNAKE_CASE_ : str = use_input_mask
SCREAMING_SNAKE_CASE_ : Any = use_token_type_ids
SCREAMING_SNAKE_CASE_ : int = use_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE_ : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Any = num_attention_heads
SCREAMING_SNAKE_CASE_ : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : str = hidden_act
SCREAMING_SNAKE_CASE_ : int = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Dict = type_vocab_size
SCREAMING_SNAKE_CASE_ : List[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Tuple = initializer_range
SCREAMING_SNAKE_CASE_ : List[str] = coordinate_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = shape_size
SCREAMING_SNAKE_CASE_ : List[str] = num_labels
SCREAMING_SNAKE_CASE_ : Optional[int] = num_choices
SCREAMING_SNAKE_CASE_ : Union[str, Any] = scope
SCREAMING_SNAKE_CASE_ : Dict = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = text_seq_length
SCREAMING_SNAKE_CASE_ : Tuple = (image_size // patch_size) ** 2 + 1
SCREAMING_SNAKE_CASE_ : Optional[int] = self.text_seq_length + self.image_seq_length
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
SCREAMING_SNAKE_CASE_ : Dict = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
SCREAMING_SNAKE_CASE_ : Optional[int] = bbox[i, j, 3]
SCREAMING_SNAKE_CASE_ : str = bbox[i, j, 1]
SCREAMING_SNAKE_CASE_ : Dict = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
SCREAMING_SNAKE_CASE_ : List[Any] = bbox[i, j, 2]
SCREAMING_SNAKE_CASE_ : Dict = bbox[i, j, 0]
SCREAMING_SNAKE_CASE_ : Tuple = tmp_coordinate
SCREAMING_SNAKE_CASE_ : Dict = tf.constant(lowercase__ )
SCREAMING_SNAKE_CASE_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ : Dict = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Any = random_attention_mask([self.batch_size, self.text_seq_length] )
SCREAMING_SNAKE_CASE_ : List[str] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : Tuple = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ : str = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = TFLayoutLMvaModel(config=lowercase__ )
# text + image
SCREAMING_SNAKE_CASE_ : int = model(lowercase__ , pixel_values=lowercase__ , training=lowercase__ )
SCREAMING_SNAKE_CASE_ : str = model(
lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , training=lowercase__ , )
SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ , training=lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
SCREAMING_SNAKE_CASE_ : Tuple = model(lowercase__ , training=lowercase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
SCREAMING_SNAKE_CASE_ : int = model({"pixel_values": pixel_values} , training=lowercase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.num_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TFLayoutLMvaForSequenceClassification(config=lowercase__ )
SCREAMING_SNAKE_CASE_ : List[str] = model(
lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ , training=lowercase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.num_labels
SCREAMING_SNAKE_CASE_ : Any = TFLayoutLMvaForTokenClassification(config=lowercase__ )
SCREAMING_SNAKE_CASE_ : int = model(
lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ , training=lowercase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = 2
SCREAMING_SNAKE_CASE_ : List[Any] = TFLayoutLMvaForQuestionAnswering(config=lowercase__ )
SCREAMING_SNAKE_CASE_ : int = model(
lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , start_positions=lowercase__ , end_positions=lowercase__ , training=lowercase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE_), (SCREAMING_SNAKE_CASE_), (SCREAMING_SNAKE_CASE_), (SCREAMING_SNAKE_CASE_), (SCREAMING_SNAKE_CASE_), (SCREAMING_SNAKE_CASE_), (SCREAMING_SNAKE_CASE_), (SCREAMING_SNAKE_CASE_)) : Any = config_and_inputs
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase,_UpperCAmelCase,unittest.TestCase ):
_A = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
_A = (
{"document-question-answering": TFLayoutLMvaForQuestionAnswering, "feature-extraction": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
_A = False
_A = False
_A = False
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
return True
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = copy.deepcopy(lowercase__ )
if model_class in get_values(lowercase__ ):
SCREAMING_SNAKE_CASE_ : str = {
k: tf.tile(tf.expand_dims(lowercase__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(lowercase__ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowercase__ ):
SCREAMING_SNAKE_CASE_ : Tuple = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowercase__ ):
SCREAMING_SNAKE_CASE_ : Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
SCREAMING_SNAKE_CASE_ : List[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowercase__ ):
SCREAMING_SNAKE_CASE_ : List[str] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowercase__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = TFLayoutLMvaModelTester(self )
SCREAMING_SNAKE_CASE_ : int = ConfigTester(self , config_class=lowercase__ , hidden_size=37 )
def __lowerCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : int = model_class(lowercase__ )
if getattr(lowercase__ , "hf_compute_loss" , lowercase__ ):
# The number of elements in the loss should be the same as the number of elements in the label
SCREAMING_SNAKE_CASE_ : Dict = self._prepare_for_class(inputs_dict.copy() , lowercase__ , return_labels=lowercase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=lowercase__ )[0]
]
SCREAMING_SNAKE_CASE_ : Any = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
SCREAMING_SNAKE_CASE_ : Dict = self._prepare_for_class(inputs_dict.copy() , lowercase__ , return_labels=lowercase__ )
SCREAMING_SNAKE_CASE_ : List[str] = prepared_for_class.pop("input_ids" )
SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase__ , **lowercase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
SCREAMING_SNAKE_CASE_ : List[Any] = self._prepare_for_class(inputs_dict.copy() , lowercase__ , return_labels=lowercase__ )
SCREAMING_SNAKE_CASE_ : int = prepared_for_class.pop("input_ids" )
if "labels" in prepared_for_class:
SCREAMING_SNAKE_CASE_ : str = prepared_for_class["labels"].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
SCREAMING_SNAKE_CASE_ : str = -100
SCREAMING_SNAKE_CASE_ : str = tf.convert_to_tensor(lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = model(lowercase__ , **lowercase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
SCREAMING_SNAKE_CASE_ : List[Any] = self._prepare_for_class(inputs_dict.copy() , lowercase__ , return_labels=lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = model(lowercase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
SCREAMING_SNAKE_CASE_ : Optional[Any] = self._prepare_for_class(inputs_dict.copy() , lowercase__ , return_labels=lowercase__ )
# Get keys that were added with the _prepare_for_class function
SCREAMING_SNAKE_CASE_ : int = prepared_for_class.keys() - inputs_dict.keys()
SCREAMING_SNAKE_CASE_ : Optional[int] = inspect.signature(model.call ).parameters
SCREAMING_SNAKE_CASE_ : Tuple = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
SCREAMING_SNAKE_CASE_ : List[Any] = {0: "input_ids"}
for label_key in label_keys:
SCREAMING_SNAKE_CASE_ : Optional[int] = signature_names.index(lowercase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = label_key
SCREAMING_SNAKE_CASE_ : List[str] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
SCREAMING_SNAKE_CASE_ : List[str] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
SCREAMING_SNAKE_CASE_ : List[str] = prepared_for_class[value]
SCREAMING_SNAKE_CASE_ : List[Any] = tuple(lowercase__ )
# Send to model
SCREAMING_SNAKE_CASE_ : int = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def __lowerCamelCase ( self ):
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
),
) : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
),
) : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE_ : List[str] = type
self.model_tester.create_and_check_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
),
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
),
) : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
),
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
@slow
def __lowerCamelCase ( self ):
"""simple docstring"""
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TFLayoutLMvaModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
def __lowerCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def __lowerCamelCase ( self ):
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=lowercase__ ) if is_vision_available() else None
@slow
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = TFLayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" )
SCREAMING_SNAKE_CASE_ : Any = self.default_image_processor
SCREAMING_SNAKE_CASE_ : Optional[int] = prepare_img()
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_processor(images=lowercase__ , return_tensors="tf" ).pixel_values
SCREAMING_SNAKE_CASE_ : Dict = tf.constant([[1, 2]] )
SCREAMING_SNAKE_CASE_ : Any = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
SCREAMING_SNAKE_CASE_ : List[Any] = model(input_ids=lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ , training=lowercase__ )
# verify the logits
SCREAMING_SNAKE_CASE_ : Tuple = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , lowercase__ )
SCREAMING_SNAKE_CASE_ : int = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase__ , atol=1e-4 ) )
| 68 | 1 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
snake_case_ = re.compile(R'\s+')
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(SCREAMING_SNAKE_CASE_ , "" , example["content"] ).encode("utf-8" ) ).hexdigest()}
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Any ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = [len(SCREAMING_SNAKE_CASE_ ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(SCREAMING_SNAKE_CASE_ ), "line_max": max(SCREAMING_SNAKE_CASE_ )}
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.mean([c.isalnum() for c in example["content"]] )
return {"alpha_frac": alpha_frac}
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["hash"] )
return True
else:
return False
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any]=5 ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ["auto-generated", "autogenerated", "automatically generated"]
SCREAMING_SNAKE_CASE_ : str = example["content"].splitlines()
for _, line in zip(range(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any]=5 , SCREAMING_SNAKE_CASE_ : str=0.05 ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = ["unit tests", "test file", "configuration file"]
SCREAMING_SNAKE_CASE_ : int = example["content"].splitlines()
SCREAMING_SNAKE_CASE_ : Tuple = 0
SCREAMING_SNAKE_CASE_ : Dict = 0
# first test
for _, line in zip(range(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
SCREAMING_SNAKE_CASE_ : List[str] = example["content"].count("\n" )
SCREAMING_SNAKE_CASE_ : Any = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("config" )
count_test += line.lower().count("test" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Any ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ["def ", "class ", "for ", "while "]
SCREAMING_SNAKE_CASE_ : str = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any]=4 ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = example["content"].splitlines()
SCREAMING_SNAKE_CASE_ : Dict = 0
for line in lines:
counter += line.lower().count("=" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer(example["content"] , truncation=SCREAMING_SNAKE_CASE_ )["input_ids"]
SCREAMING_SNAKE_CASE_ : List[Any] = len(example["content"] ) / len(SCREAMING_SNAKE_CASE_ )
return {"ratio": ratio}
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : str ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = {}
results.update(get_hash(SCREAMING_SNAKE_CASE_ ) )
results.update(line_stats(SCREAMING_SNAKE_CASE_ ) )
results.update(alpha_stats(SCREAMING_SNAKE_CASE_ ) )
results.update(char_token_ratio(SCREAMING_SNAKE_CASE_ ) )
results.update(is_autogenerated(SCREAMING_SNAKE_CASE_ ) )
results.update(is_config_or_test(SCREAMING_SNAKE_CASE_ ) )
results.update(has_no_keywords(SCREAMING_SNAKE_CASE_ ) )
results.update(has_few_assignments(SCREAMING_SNAKE_CASE_ ) )
return results
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] ) -> List[str]:
"""simple docstring"""
if not check_uniques(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int ) -> Tuple:
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE_ , "rb" ) as f_in:
with gzip.open(str(SCREAMING_SNAKE_CASE_ ) + ".gz" , "wb" , compresslevel=6 ) as f_out:
shutil.copyfileobj(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
os.unlink(SCREAMING_SNAKE_CASE_ )
# Settings
snake_case_ = HfArgumentParser(PreprocessingArguments)
snake_case_ = parser.parse_args()
if args.num_workers is None:
snake_case_ = multiprocessing.cpu_count()
snake_case_ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
snake_case_ = time.time()
snake_case_ = load_dataset(args.dataset_name, split='train')
print(F'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
snake_case_ = time.time()
snake_case_ = ds.map(preprocess, num_proc=args.num_workers)
print(F'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
snake_case_ = set(ds.unique('hash'))
snake_case_ = len(uniques) / len(ds)
print(F'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
snake_case_ = time.time()
snake_case_ = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args})
print(F'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(F'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
snake_case_ = time.time()
snake_case_ , snake_case_ = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(F'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
snake_case_ = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / 'duplicate_clusters.json', 'w') as f:
json.dump(duplicate_clusters, f)
snake_case_ = output_dir / 'data'
data_dir.mkdir(exist_ok=True)
snake_case_ = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
snake_case_ = str(data_dir / F'''file-{file_number+1:012}.json''')
snake_case_ = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F'''Time to save dataset: {time.time()-t_start:.2f}''')
| 68 |
'''simple docstring'''
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = [1]
for i in range(2 , SCREAMING_SNAKE_CASE_ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
SCREAMING_SNAKE_CASE_ : Dict = list(range(SCREAMING_SNAKE_CASE_ ) )
# Find permutation
while factorials:
SCREAMING_SNAKE_CASE_ : Any = factorials.pop()
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Optional[int] = divmod(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 68 | 1 |
'''simple docstring'''
from math import factorial
class SCREAMING_SNAKE_CASE__ :
def __init__( self , lowercase__ , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = real
if isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE_ : Dict = [1] * rank
else:
SCREAMING_SNAKE_CASE_ : int = rank
def __repr__( self ):
"""simple docstring"""
return (
F"{self.real}+"
F"{'+'.join(str(lowercase__ )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"
)
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , lowercase__ )
def __add__( self , lowercase__ ):
"""simple docstring"""
if not isinstance(lowercase__ , lowercase__ ):
return Dual(self.real + other , self.duals )
SCREAMING_SNAKE_CASE_ : Dict = self.duals.copy()
SCREAMING_SNAKE_CASE_ : Tuple = other.duals.copy()
if len(lowercase__ ) > len(lowercase__ ):
o_dual.extend([1] * (len(lowercase__ ) - len(lowercase__ )) )
elif len(lowercase__ ) < len(lowercase__ ):
s_dual.extend([1] * (len(lowercase__ ) - len(lowercase__ )) )
SCREAMING_SNAKE_CASE_ : List[Any] = []
for i in range(len(lowercase__ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , lowercase__ )
_A = __add__
def __sub__( self , lowercase__ ):
"""simple docstring"""
return self + other * -1
def __mul__( self , lowercase__ ):
"""simple docstring"""
if not isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , lowercase__ )
_A = __mul__
def __truediv__( self , lowercase__ ):
"""simple docstring"""
if not isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , lowercase__ )
raise ValueError
def __floordiv__( self , lowercase__ ):
"""simple docstring"""
if not isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , lowercase__ )
raise ValueError
def __pow__( self , lowercase__ ):
"""simple docstring"""
if n < 0 or isinstance(lowercase__ , lowercase__ ):
raise ValueError("power must be a positive integer" )
if n == 0:
return 1
if n == 1:
return self
SCREAMING_SNAKE_CASE_ : Optional[Any] = self
for _ in range(n - 1 ):
x *= self
return x
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> Any:
"""simple docstring"""
if not callable(SCREAMING_SNAKE_CASE_ ):
raise ValueError("differentiate() requires a function as input for func" )
if not isinstance(SCREAMING_SNAKE_CASE_ , (float, int) ):
raise ValueError("differentiate() requires a float as input for position" )
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError("differentiate() requires an int as input for order" )
SCREAMING_SNAKE_CASE_ : List[str] = Dual(SCREAMING_SNAKE_CASE_ , 1 )
SCREAMING_SNAKE_CASE_ : Dict = func(SCREAMING_SNAKE_CASE_ )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int ) -> Union[str, Any]:
"""simple docstring"""
return y**2 * y**4
print(differentiate(f, 9, 2))
| 68 |
'''simple docstring'''
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--original_config_file',
type=str,
required=True,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--image_size',
default=5_1_2,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Dict:
"""simple docstring"""
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F"could not parse string as bool {string}" )
parser.add_argument(
'--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool
)
parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int)
snake_case_ = parser.parse_args()
snake_case_ = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 68 | 1 |
'''simple docstring'''
import numpy as np
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : np.ndarray ) -> np.ndarray:
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : np.ndarray ) -> np.ndarray:
"""simple docstring"""
return vector * sigmoid(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 68 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
_A = "camembert"
def __init__( self , lowercase__=3_0522 , lowercase__=768 , lowercase__=12 , lowercase__=12 , lowercase__=3072 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=512 , lowercase__=2 , lowercase__=0.02 , lowercase__=1e-12 , lowercase__=1 , lowercase__=0 , lowercase__=2 , lowercase__="absolute" , lowercase__=True , lowercase__=None , **lowercase__ , ):
"""simple docstring"""
super().__init__(pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__ )
SCREAMING_SNAKE_CASE_ : Tuple = vocab_size
SCREAMING_SNAKE_CASE_ : str = hidden_size
SCREAMING_SNAKE_CASE_ : str = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Dict = num_attention_heads
SCREAMING_SNAKE_CASE_ : Any = hidden_act
SCREAMING_SNAKE_CASE_ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : List[str] = type_vocab_size
SCREAMING_SNAKE_CASE_ : Tuple = initializer_range
SCREAMING_SNAKE_CASE_ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : List[Any] = position_embedding_type
SCREAMING_SNAKE_CASE_ : Any = use_cache
SCREAMING_SNAKE_CASE_ : Optional[int] = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE_ : Any = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 68 | 1 |
'''simple docstring'''
snake_case_ = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
snake_case_ = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : dict[int, list[int]] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list[bool] ) -> list[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = True
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
order.append(SCREAMING_SNAKE_CASE_ )
return order
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : dict[int, list[int]] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list[bool] ) -> list[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : List[Any] = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return component
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : dict[int, list[int]] ) -> list[list[int]]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = len(SCREAMING_SNAKE_CASE_ ) * [False]
SCREAMING_SNAKE_CASE_ : dict[int, list[int]] = {vert: [] for vert in range(len(SCREAMING_SNAKE_CASE_ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : List[str] = []
for i, was_visited in enumerate(SCREAMING_SNAKE_CASE_ ):
if not was_visited:
order += topology_sort(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : int = []
SCREAMING_SNAKE_CASE_ : Optional[Any] = len(SCREAMING_SNAKE_CASE_ ) * [False]
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
SCREAMING_SNAKE_CASE_ : Dict = order[len(SCREAMING_SNAKE_CASE_ ) - i - 1]
if not visited[vert]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = find_components(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
components_list.append(SCREAMING_SNAKE_CASE_ )
return components_list
| 68 |
'''simple docstring'''
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : list[int] ) -> list[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = len(SCREAMING_SNAKE_CASE_ )
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE_ ):
if numbers[j] < numbers[i]:
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Tuple = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
snake_case_ = input('Enter numbers separated by a comma:\n').strip()
snake_case_ = [int(item) for item in user_input.split(',')]
print(exchange_sort(unsorted))
| 68 | 1 |
'''simple docstring'''
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
snake_case_ = TypeVar('KT')
snake_case_ = TypeVar('VT')
class SCREAMING_SNAKE_CASE__ ( Generic[KT, VT] ):
def __init__( self , lowercase__ = "root" , lowercase__ = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = key
SCREAMING_SNAKE_CASE_ : List[str] = value
SCREAMING_SNAKE_CASE_ : list[Node[KT, VT]] = []
def __repr__( self ):
"""simple docstring"""
return F"Node({self.key}: {self.value})"
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
return len(self.forward )
class SCREAMING_SNAKE_CASE__ ( Generic[KT, VT] ):
def __init__( self , lowercase__ = 0.5 , lowercase__ = 16 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Node[KT, VT] = Node[KT, VT]()
SCREAMING_SNAKE_CASE_ : Dict = 0
SCREAMING_SNAKE_CASE_ : Dict = p
SCREAMING_SNAKE_CASE_ : str = max_level
def __str__( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = list(self )
if len(lowercase__ ) == 0:
return F"SkipList(level={self.level})"
SCREAMING_SNAKE_CASE_ : Optional[Any] = max((len(str(lowercase__ ) ) for item in items) , default=4 )
SCREAMING_SNAKE_CASE_ : int = max(lowercase__ , 4 ) + 4
SCREAMING_SNAKE_CASE_ : Dict = self.head
SCREAMING_SNAKE_CASE_ : str = []
SCREAMING_SNAKE_CASE_ : List[str] = node.forward.copy()
lines.append(F"[{node.key}]".ljust(lowercase__ , "-" ) + "* " * len(lowercase__ ) )
lines.append(" " * label_size + "| " * len(lowercase__ ) )
while len(node.forward ) != 0:
SCREAMING_SNAKE_CASE_ : List[str] = node.forward[0]
lines.append(
F"[{node.key}]".ljust(lowercase__ , "-" )
+ " ".join(str(n.key ) if n.key == node.key else "|" for n in forwards ) )
lines.append(" " * label_size + "| " * len(lowercase__ ) )
SCREAMING_SNAKE_CASE_ : str = node.forward
lines.append("None".ljust(lowercase__ ) + "* " * len(lowercase__ ) )
return F"SkipList(level={self.level})\n" + "\n".join(lowercase__ )
def __iter__( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
SCREAMING_SNAKE_CASE_ : Optional[int] = node.forward[0]
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = []
SCREAMING_SNAKE_CASE_ : List[Any] = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
SCREAMING_SNAKE_CASE_ : Any = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(lowercase__ )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : int = self._locate_node(lowercase__ )
if node is not None:
for i, update_node in enumerate(lowercase__ ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
SCREAMING_SNAKE_CASE_ : str = node.forward[i]
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = update_node.forward[:i]
def __lowerCamelCase ( self , lowercase__ , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Dict = self._locate_node(lowercase__ )
if node is not None:
SCREAMING_SNAKE_CASE_ : Optional[int] = value
else:
SCREAMING_SNAKE_CASE_ : Any = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , lowercase__ ):
update_vector.append(self.head )
SCREAMING_SNAKE_CASE_ : Dict = level
SCREAMING_SNAKE_CASE_ : List[str] = Node(lowercase__ , lowercase__ )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(lowercase__ )
else:
SCREAMING_SNAKE_CASE_ : List[Any] = new_node
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Dict = self._locate_node(lowercase__ )
if node is not None:
return node.value
return None
def __lowerCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = SkipList()
skip_list.insert("Key1" , 3 )
skip_list.insert("Key2" , 1_2 )
skip_list.insert("Key3" , 4_1 )
skip_list.insert("Key4" , -1_9 )
SCREAMING_SNAKE_CASE_ : List[str] = skip_list.head
SCREAMING_SNAKE_CASE_ : Any = {}
while node.level != 0:
SCREAMING_SNAKE_CASE_ : Optional[int] = node.forward[0]
SCREAMING_SNAKE_CASE_ : List[str] = node.value
assert len(SCREAMING_SNAKE_CASE_ ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 1_2
assert all_values["Key3"] == 4_1
assert all_values["Key4"] == -1_9
def __lowerCamelCase ( ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = SkipList()
skip_list.insert("Key1" , 1_0 )
skip_list.insert("Key1" , 1_2 )
skip_list.insert("Key5" , 7 )
skip_list.insert("Key7" , 1_0 )
skip_list.insert("Key10" , 5 )
skip_list.insert("Key7" , 7 )
skip_list.insert("Key5" , 5 )
skip_list.insert("Key10" , 1_0 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = skip_list.head
SCREAMING_SNAKE_CASE_ : Any = {}
while node.level != 0:
SCREAMING_SNAKE_CASE_ : List[Any] = node.forward[0]
SCREAMING_SNAKE_CASE_ : Tuple = node.value
if len(SCREAMING_SNAKE_CASE_ ) != 4:
print()
assert len(SCREAMING_SNAKE_CASE_ ) == 4
assert all_values["Key1"] == 1_2
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 1_0
def __lowerCamelCase ( ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = SkipList()
assert skip_list.find("Some key" ) is None
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = SkipList()
skip_list.insert("Key2" , 2_0 )
assert skip_list.find("Key2" ) == 2_0
skip_list.insert("Some Key" , 1_0 )
skip_list.insert("Key2" , 8 )
skip_list.insert("V" , 1_3 )
assert skip_list.find("Y" ) is None
assert skip_list.find("Key2" ) == 8
assert skip_list.find("Some Key" ) == 1_0
assert skip_list.find("V" ) == 1_3
def __lowerCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = SkipList()
skip_list.delete("Some key" )
assert len(skip_list.head.forward ) == 0
def __lowerCamelCase ( ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = SkipList()
skip_list.insert("Key1" , 1_2 )
skip_list.insert("V" , 1_3 )
skip_list.insert("X" , 1_4 )
skip_list.insert("Key2" , 1_5 )
skip_list.delete("V" )
skip_list.delete("Key2" )
assert skip_list.find("V" ) is None
assert skip_list.find("Key2" ) is None
def __lowerCamelCase ( ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = SkipList()
skip_list.insert("Key1" , 1_2 )
skip_list.insert("V" , 1_3 )
skip_list.insert("X" , 1_4 )
skip_list.insert("Key2" , 1_5 )
skip_list.delete("V" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) == 1_4
assert skip_list.find("Key1" ) == 1_2
assert skip_list.find("Key2" ) == 1_5
skip_list.delete("X" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) == 1_2
assert skip_list.find("Key2" ) == 1_5
skip_list.delete("Key1" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) is None
assert skip_list.find("Key2" ) == 1_5
skip_list.delete("Key2" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) is None
assert skip_list.find("Key2" ) is None
def __lowerCamelCase ( ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = SkipList()
skip_list.insert("Key1" , 1_2 )
skip_list.insert("V" , 1_3 )
skip_list.insert("X" , 1_4_2 )
skip_list.insert("Key2" , 1_5 )
skip_list.delete("X" )
def traverse_keys(SCREAMING_SNAKE_CASE_ : Optional[int] ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(SCREAMING_SNAKE_CASE_ )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def __lowerCamelCase ( ) -> Dict:
"""simple docstring"""
def is_sorted(SCREAMING_SNAKE_CASE_ : int ):
return all(next_item >= item for item, next_item in zip(SCREAMING_SNAKE_CASE_ , lst[1:] ) )
SCREAMING_SNAKE_CASE_ : Optional[int] = SkipList()
for i in range(1_0 ):
skip_list.insert(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert is_sorted(list(SCREAMING_SNAKE_CASE_ ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(SCREAMING_SNAKE_CASE_ ) )
skip_list.insert(-1_2 , -1_2 )
skip_list.insert(7_7 , 7_7 )
assert is_sorted(list(SCREAMING_SNAKE_CASE_ ) )
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
for _ in range(1_0_0 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def __lowerCamelCase ( ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = SkipList()
skip_list.insert(2 , "2" )
skip_list.insert(4 , "4" )
skip_list.insert(6 , "4" )
skip_list.insert(4 , "5" )
skip_list.insert(8 , "4" )
skip_list.insert(9 , "4" )
skip_list.delete(4 )
print(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 68 |
'''simple docstring'''
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
snake_case_ = logging.getLogger()
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Path , SCREAMING_SNAKE_CASE_ : list ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = "\n".join(SCREAMING_SNAKE_CASE_ )
Path(SCREAMING_SNAKE_CASE_ ).open("w" ).writelines(SCREAMING_SNAKE_CASE_ )
snake_case_ = 'patrickvonplaten/t5-tiny-random'
snake_case_ = 'sshleifer/bart-tiny-random'
snake_case_ = 'sshleifer/tiny-mbart'
snake_case_ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source"
SCREAMING_SNAKE_CASE_ : List[str] = input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
SCREAMING_SNAKE_CASE_ : Dict = [" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."]
_dump_articles(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE_ : Tuple = str(Path(self.get_auto_remove_tmp_dir() ) / "scores.json" )
SCREAMING_SNAKE_CASE_ : Tuple = "translation_en_to_de" if model == T5_TINY else "summarization"
SCREAMING_SNAKE_CASE_ : Dict = F"\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n ".split()
with patch.object(lowercase__ , "argv" , lowercase__ ):
run_generate()
assert Path(lowercase__ ).exists()
# os.remove(Path(output_file_name))
def __lowerCamelCase ( self ):
"""simple docstring"""
self.run_eval_tester(lowercase__ )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
self.run_eval_tester(lowercase__ )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source"
SCREAMING_SNAKE_CASE_ : Optional[Any] = input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
SCREAMING_SNAKE_CASE_ : List[Any] = {
"en": ["Machine learning is great, isn't it?", "I like to eat bananas", "Tomorrow is another great day!"],
"de": [
"Maschinelles Lernen ist großartig, oder?",
"Ich esse gerne Bananen",
"Morgen ist wieder ein toller Tag!",
],
}
SCREAMING_SNAKE_CASE_ : Dict = Path(self.get_auto_remove_tmp_dir() )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = str(tmp_dir / "scores.json" )
SCREAMING_SNAKE_CASE_ : List[Any] = str(tmp_dir / "val.target" )
_dump_articles(lowercase__ , text["en"] )
_dump_articles(lowercase__ , text["de"] )
SCREAMING_SNAKE_CASE_ : List[Any] = "translation_en_to_de" if model == T5_TINY else "summarization"
SCREAMING_SNAKE_CASE_ : List[str] = F"\n run_eval_search.py\n {model}\n {str(lowercase__ )}\n {str(lowercase__ )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n ".split()
testargs.extend(["--search", "num_beams=1:2 length_penalty=0.9:1.0"] )
with patch.object(lowercase__ , "argv" , lowercase__ ):
with CaptureStdout() as cs:
run_search()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [" num_beams | length_penalty", model, "Best score args"]
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["Info"]
if "translation" in task:
expected_strings.append("bleu" )
else:
expected_strings.extend(lowercase__ )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(lowercase__ ).exists()
os.remove(Path(lowercase__ ) )
| 68 | 1 |
'''simple docstring'''
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : int = 1_0_0_0 ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
for divide_by_number in range(SCREAMING_SNAKE_CASE_ , digit + 1 ):
SCREAMING_SNAKE_CASE_ : list[int] = []
SCREAMING_SNAKE_CASE_ : int = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = len(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : str = divide_by_number
else:
has_been_divided.append(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = now_divide * 1_0 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 68 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : NDArray[floataa] , SCREAMING_SNAKE_CASE_ : NDArray[floataa] , SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : int , ) -> list[float]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : str = coefficient_matrix.shape
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : List[Any] = constant_matrix.shape
if rowsa != colsa:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = F"Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"
raise ValueError(SCREAMING_SNAKE_CASE_ )
if colsa != 1:
SCREAMING_SNAKE_CASE_ : List[Any] = F"Constant matrix must be nx1 but received {rowsa}x{colsa}"
raise ValueError(SCREAMING_SNAKE_CASE_ )
if rowsa != rowsa:
SCREAMING_SNAKE_CASE_ : Any = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
F"received {rowsa}x{colsa} and {rowsa}x{colsa}"
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) != rowsa:
SCREAMING_SNAKE_CASE_ : int = (
"Number of initial values must be equal to number of rows in coefficient "
F"matrix but received {len(SCREAMING_SNAKE_CASE_ )} and {rowsa}"
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
SCREAMING_SNAKE_CASE_ : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Any = table.shape
strictly_diagonally_dominant(SCREAMING_SNAKE_CASE_ )
# Iterates the whole matrix for given number of times
for _ in range(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ : Tuple = []
for row in range(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ : Any = 0
for col in range(SCREAMING_SNAKE_CASE_ ):
if col == row:
SCREAMING_SNAKE_CASE_ : Any = table[row][col]
elif col == cols - 1:
SCREAMING_SNAKE_CASE_ : Dict = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
SCREAMING_SNAKE_CASE_ : Optional[Any] = (temp + val) / denom
new_val.append(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = new_val
return [float(SCREAMING_SNAKE_CASE_ ) for i in new_val]
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : NDArray[floataa] ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Dict = table.shape
SCREAMING_SNAKE_CASE_ : Tuple = True
for i in range(0 , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ : int = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 68 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'andreasmadsen/efficient_mlm_m0.40': (
'https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
_A = "roberta-prelayernorm"
def __init__( self , lowercase__=5_0265 , lowercase__=768 , lowercase__=12 , lowercase__=12 , lowercase__=3072 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=512 , lowercase__=2 , lowercase__=0.02 , lowercase__=1e-12 , lowercase__=1 , lowercase__=0 , lowercase__=2 , lowercase__="absolute" , lowercase__=True , lowercase__=None , **lowercase__ , ):
"""simple docstring"""
super().__init__(pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__ )
SCREAMING_SNAKE_CASE_ : Dict = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE_ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE_ : Any = hidden_act
SCREAMING_SNAKE_CASE_ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE_ : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : List[str] = type_vocab_size
SCREAMING_SNAKE_CASE_ : Dict = initializer_range
SCREAMING_SNAKE_CASE_ : Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE_ : str = position_embedding_type
SCREAMING_SNAKE_CASE_ : Optional[int] = use_cache
SCREAMING_SNAKE_CASE_ : List[str] = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ : Dict = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE_ : List[str] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 68 |
'''simple docstring'''
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> int:
"""simple docstring"""
return 1 if input_a == input_a else 0
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 68 | 1 |
'''simple docstring'''
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
SCREAMING_SNAKE_CASE_ : Dict = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = -1
SCREAMING_SNAKE_CASE_ : int = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowercase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model.generate(lowercase__ , max_new_tokens=10 , do_sample=lowercase__ )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
SCREAMING_SNAKE_CASE_ : Any = TextStreamer(lowercase__ )
model.generate(lowercase__ , max_new_tokens=10 , do_sample=lowercase__ , streamer=lowercase__ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
SCREAMING_SNAKE_CASE_ : Tuple = cs.out[:-1]
self.assertEqual(lowercase__ , lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
SCREAMING_SNAKE_CASE_ : Optional[int] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(lowercase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = -1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowercase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model.generate(lowercase__ , max_new_tokens=10 , do_sample=lowercase__ )
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer.decode(greedy_ids[0] )
SCREAMING_SNAKE_CASE_ : Dict = TextIteratorStreamer(lowercase__ )
SCREAMING_SNAKE_CASE_ : Dict = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
SCREAMING_SNAKE_CASE_ : int = Thread(target=model.generate , kwargs=lowercase__ )
thread.start()
SCREAMING_SNAKE_CASE_ : Tuple = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(lowercase__ , lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(lowercase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = -1
SCREAMING_SNAKE_CASE_ : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowercase__ )
SCREAMING_SNAKE_CASE_ : int = model.generate(lowercase__ , max_new_tokens=10 , do_sample=lowercase__ )
SCREAMING_SNAKE_CASE_ : Tuple = greedy_ids[:, input_ids.shape[1] :]
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
SCREAMING_SNAKE_CASE_ : List[str] = TextStreamer(lowercase__ , skip_prompt=lowercase__ )
model.generate(lowercase__ , max_new_tokens=10 , do_sample=lowercase__ , streamer=lowercase__ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
SCREAMING_SNAKE_CASE_ : List[str] = cs.out[:-1]
self.assertEqual(lowercase__ , lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = AutoTokenizer.from_pretrained("distilgpt2" )
SCREAMING_SNAKE_CASE_ : List[str] = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(lowercase__ )
SCREAMING_SNAKE_CASE_ : List[str] = -1
SCREAMING_SNAKE_CASE_ : Tuple = torch.ones((1, 5) , device=lowercase__ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
SCREAMING_SNAKE_CASE_ : List[str] = TextStreamer(lowercase__ , skip_special_tokens=lowercase__ )
model.generate(lowercase__ , max_new_tokens=1 , do_sample=lowercase__ , streamer=lowercase__ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
SCREAMING_SNAKE_CASE_ : Optional[int] = cs.out[:-1] # Remove the final "\n"
SCREAMING_SNAKE_CASE_ : Dict = tokenizer(lowercase__ , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
SCREAMING_SNAKE_CASE_ : Tuple = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(lowercase__ )
SCREAMING_SNAKE_CASE_ : str = -1
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowercase__ )
SCREAMING_SNAKE_CASE_ : Tuple = TextIteratorStreamer(lowercase__ , timeout=0.001 )
SCREAMING_SNAKE_CASE_ : Dict = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
SCREAMING_SNAKE_CASE_ : Optional[Any] = Thread(target=model.generate , kwargs=lowercase__ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(lowercase__ ):
SCREAMING_SNAKE_CASE_ : Dict = ""
for new_text in streamer:
streamer_text += new_text
| 68 |
'''simple docstring'''
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , ) -> float:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
SCREAMING_SNAKE_CASE_ : int = 1 - (matter_density + radiation_density + dark_energy)
SCREAMING_SNAKE_CASE_ : List[Any] = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
SCREAMING_SNAKE_CASE_ : Dict = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
snake_case_ = 0.3
print(
hubble_parameter(
hubble_constant=6_8.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 68 | 1 |
'''simple docstring'''
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
'constant': get_constant_schedule,
'constant_w_warmup': get_constant_schedule_with_warmup,
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__( self , lowercase__=None , lowercase__=None , *lowercase__ , **lowercase__ ):
"""simple docstring"""
super().__init__(*lowercase__ , **lowercase__ )
if config is None:
assert isinstance(self.model , lowercase__ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F" {self.model.__class__}"
)
SCREAMING_SNAKE_CASE_ : str = self.model.config
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = config
SCREAMING_SNAKE_CASE_ : Any = data_args
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.config.tgt_vocab_size if isinstance(self.config , lowercase__ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F"The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"
" padding.." )
if self.args.label_smoothing == 0:
SCREAMING_SNAKE_CASE_ : Tuple = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
SCREAMING_SNAKE_CASE_ : List[Any] = label_smoothed_nll_loss
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
if self.optimizer is None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["bias", "LayerNorm.weight"]
SCREAMING_SNAKE_CASE_ : Dict = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"weight_decay": 0.0,
},
]
SCREAMING_SNAKE_CASE_ : List[str] = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
SCREAMING_SNAKE_CASE_ : Optional[int] = Adafactor
SCREAMING_SNAKE_CASE_ : List[Any] = {"scale_parameter": False, "relative_step": False}
else:
SCREAMING_SNAKE_CASE_ : int = AdamW
SCREAMING_SNAKE_CASE_ : Dict = {
"betas": (self.args.adam_betaa, self.args.adam_betaa),
"eps": self.args.adam_epsilon,
}
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.args.learning_rate
if self.sharded_ddp:
SCREAMING_SNAKE_CASE_ : List[str] = OSS(
params=lowercase__ , optim=lowercase__ , **lowercase__ , )
else:
SCREAMING_SNAKE_CASE_ : int = optimizer_cls(lowercase__ , **lowercase__ )
if self.lr_scheduler is None:
SCREAMING_SNAKE_CASE_ : List[str] = self._get_lr_scheduler(lowercase__ )
else: # ignoring --lr_scheduler
logger.warning("scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored." )
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
SCREAMING_SNAKE_CASE_ : Tuple = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
SCREAMING_SNAKE_CASE_ : str = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
SCREAMING_SNAKE_CASE_ : Dict = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=lowercase__ )
return scheduler
def __lowerCamelCase ( self ):
"""simple docstring"""
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
SCREAMING_SNAKE_CASE_ : List[str] = model(**lowercase__ , use_cache=lowercase__ )[0]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Optional[Any] = model(**lowercase__ , labels=lowercase__ , use_cache=lowercase__ )[:2]
else:
# compute label smoothed loss
SCREAMING_SNAKE_CASE_ : str = model(**lowercase__ , use_cache=lowercase__ )[0]
SCREAMING_SNAKE_CASE_ : List[str] = torch.nn.functional.log_softmax(lowercase__ , dim=-1 )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Optional[int] = self.loss_fn(lowercase__ , lowercase__ , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def __lowerCamelCase ( self , lowercase__ , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = inputs.pop("labels" )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Any = self._compute_loss(lowercase__ , lowercase__ , lowercase__ )
return loss
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self._prepare_inputs(lowercase__ )
SCREAMING_SNAKE_CASE_ : int = {
"max_length": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"num_beams": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
SCREAMING_SNAKE_CASE_ : str = self.model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , **lowercase__ , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
SCREAMING_SNAKE_CASE_ : List[str] = self._pad_tensors_to_max_len(lowercase__ , gen_kwargs["max_length"] )
SCREAMING_SNAKE_CASE_ : Any = inputs.pop("labels" )
with torch.no_grad():
# compute loss on predict data
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : str = self._compute_loss(lowercase__ , lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
SCREAMING_SNAKE_CASE_ : Any = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
SCREAMING_SNAKE_CASE_ : Optional[int] = self._pad_tensors_to_max_len(lowercase__ , gen_kwargs["max_length"] )
return (loss, logits, labels)
def __lowerCamelCase ( self , lowercase__ , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"
F" padded to `max_length`={max_length}" )
SCREAMING_SNAKE_CASE_ : Optional[int] = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
SCREAMING_SNAKE_CASE_ : List[str] = tensor
return padded_tensor
| 68 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = [[1, 2, 4], [1, 2, 3, 4]]
SCREAMING_SNAKE_CASE_ : Any = DisjunctiveConstraint(lowercase__ )
self.assertTrue(isinstance(dc.token_ids , lowercase__ ) )
with self.assertRaises(lowercase__ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(lowercase__ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(lowercase__ ):
DisjunctiveConstraint(lowercase__ ) # fails here
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = [[1, 2, 3], [1, 2, 4]]
SCREAMING_SNAKE_CASE_ : Optional[Any] = DisjunctiveConstraint(lowercase__ )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Dict = dc.update(1 )
SCREAMING_SNAKE_CASE_ : Optional[int] = stepped is True and completed is False and reset is False
self.assertTrue(lowercase__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Tuple = dc.update(2 )
SCREAMING_SNAKE_CASE_ : Tuple = stepped is True and completed is False and reset is False
self.assertTrue(lowercase__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Any = dc.update(3 )
SCREAMING_SNAKE_CASE_ : Tuple = stepped is True and completed is True and reset is False
self.assertTrue(lowercase__ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
SCREAMING_SNAKE_CASE_ : Dict = DisjunctiveConstraint(lowercase__ )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Dict = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : List[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Dict = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : List[Any] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Tuple = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : str = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : str = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 68 | 1 |
'''simple docstring'''
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , ) -> float:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
SCREAMING_SNAKE_CASE_ : int = 1 - (matter_density + radiation_density + dark_energy)
SCREAMING_SNAKE_CASE_ : List[Any] = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
SCREAMING_SNAKE_CASE_ : Dict = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
snake_case_ = 0.3
print(
hubble_parameter(
hubble_constant=6_8.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 68 |
'''simple docstring'''
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase,_UpperCAmelCase,unittest.TestCase ):
_A = VQModel
_A = "sample"
@property
def __lowerCamelCase ( self , lowercase__=(32, 32) ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = 4
SCREAMING_SNAKE_CASE_ : str = 3
SCREAMING_SNAKE_CASE_ : str = floats_tensor((batch_size, num_channels) + sizes ).to(lowercase__ )
return {"sample": image}
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
return (3, 32, 32)
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
return (3, 32, 32)
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 3,
}
SCREAMING_SNAKE_CASE_ : int = self.dummy_input
return init_dict, inputs_dict
def __lowerCamelCase ( self ):
"""simple docstring"""
pass
def __lowerCamelCase ( self ):
"""simple docstring"""
pass
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : int = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = VQModel.from_pretrained("fusing/vqgan-dummy" )
model.to(lowercase__ ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
SCREAMING_SNAKE_CASE_ : str = image.to(lowercase__ )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : int = model(lowercase__ ).sample
SCREAMING_SNAKE_CASE_ : Any = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] )
# fmt: on
self.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-3 ) )
| 68 | 1 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE__ :
def __init__( self , lowercase__ , lowercase__=2 , lowercase__=True , lowercase__=False , lowercase__=10 , lowercase__=3 , lowercase__=32 * 8 , lowercase__=32 * 8 , lowercase__=4 , lowercase__=64 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = parent
SCREAMING_SNAKE_CASE_ : str = batch_size
SCREAMING_SNAKE_CASE_ : List[str] = is_training
SCREAMING_SNAKE_CASE_ : int = use_auxiliary_loss
SCREAMING_SNAKE_CASE_ : Dict = num_queries
SCREAMING_SNAKE_CASE_ : str = num_channels
SCREAMING_SNAKE_CASE_ : Dict = min_size
SCREAMING_SNAKE_CASE_ : Tuple = max_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_labels
SCREAMING_SNAKE_CASE_ : int = hidden_dim
SCREAMING_SNAKE_CASE_ : Dict = hidden_dim
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowercase__ )
SCREAMING_SNAKE_CASE_ : List[str] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowercase__ ) > 0.5
).float()
SCREAMING_SNAKE_CASE_ : str = (torch.rand((self.batch_size, self.num_labels) , device=lowercase__ ) > 0.5).long()
SCREAMING_SNAKE_CASE_ : Dict = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
SCREAMING_SNAKE_CASE_ : List[str] = self.num_queries
SCREAMING_SNAKE_CASE_ : Any = self.num_labels
SCREAMING_SNAKE_CASE_ : str = [1, 1, 1, 1]
SCREAMING_SNAKE_CASE_ : Dict = self.num_channels
SCREAMING_SNAKE_CASE_ : Any = 64
SCREAMING_SNAKE_CASE_ : Any = 128
SCREAMING_SNAKE_CASE_ : Any = self.hidden_dim
SCREAMING_SNAKE_CASE_ : Optional[int] = self.hidden_dim
SCREAMING_SNAKE_CASE_ : Optional[int] = self.hidden_dim
return config
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : int = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ : Optional[Any] = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def __lowerCamelCase ( self , lowercase__ , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = output.encoder_hidden_states
SCREAMING_SNAKE_CASE_ : Tuple = output.pixel_decoder_hidden_states
SCREAMING_SNAKE_CASE_ : Union[str, Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowercase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowercase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowercase__ ) , config.decoder_layers )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__=False ):
"""simple docstring"""
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : List[Any] = MaskaFormerModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[int] = model(pixel_values=lowercase__ , pixel_mask=lowercase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(lowercase__ , output_hidden_states=lowercase__ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowercase__ , lowercase__ )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = MaskaFormerForUniversalSegmentation(config=lowercase__ )
model.to(lowercase__ )
model.eval()
def comm_check_on_output(lowercase__ ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(pixel_values=lowercase__ , pixel_mask=lowercase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase__ )
comm_check_on_output(lowercase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = model(
pixel_values=lowercase__ , pixel_mask=lowercase__ , mask_labels=lowercase__ , class_labels=lowercase__ )
comm_check_on_output(lowercase__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase,_UpperCAmelCase,unittest.TestCase ):
_A = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
_A = {"feature-extraction": MaskaFormerModel} if is_torch_available() else {}
_A = False
_A = False
_A = False
_A = False
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = MaskaFormerModelTester(self )
SCREAMING_SNAKE_CASE_ : Tuple = ConfigTester(self , config_class=lowercase__ , has_text_modality=lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowercase__ , **lowercase__ , output_hidden_states=lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*lowercase__ )
@unittest.skip(reason="Mask2Former does not use inputs_embeds" )
def __lowerCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="Mask2Former does not have a get_input_embeddings method" )
def __lowerCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="Mask2Former is not a generative model" )
def __lowerCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="Mask2Former does not use token embeddings" )
def __lowerCamelCase ( self ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(
reason="Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def __lowerCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __lowerCamelCase ( self ):
"""simple docstring"""
pass
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Tuple = model_class(lowercase__ )
SCREAMING_SNAKE_CASE_ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ : Dict = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase__ )
@slow
def __lowerCamelCase ( self ):
"""simple docstring"""
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
SCREAMING_SNAKE_CASE_ : Any = MaskaFormerModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = (self.model_tester.min_size,) * 2
SCREAMING_SNAKE_CASE_ : Tuple = {
"pixel_values": torch.randn((2, 3, *size) , device=lowercase__ ),
"mask_labels": torch.randn((2, 10, *size) , device=lowercase__ ),
"class_labels": torch.zeros(2 , 10 , device=lowercase__ ).long(),
}
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.get_config()
SCREAMING_SNAKE_CASE_ : List[Any] = MaskaFormerForUniversalSegmentation(lowercase__ ).to(lowercase__ )
SCREAMING_SNAKE_CASE_ : Dict = model(**lowercase__ )
self.assertTrue(outputs.loss is not None )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowercase__ , **lowercase__ , output_hidden_states=lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Any = model_class(lowercase__ ).to(lowercase__ )
SCREAMING_SNAKE_CASE_ : Dict = model(**lowercase__ , output_attentions=lowercase__ )
self.assertTrue(outputs.attentions is not None )
def __lowerCamelCase ( self ):
"""simple docstring"""
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.all_model_classes[1]
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ : List[str] = model_class(lowercase__ )
model.to(lowercase__ )
model.train()
SCREAMING_SNAKE_CASE_ : str = model(lowercase__ , mask_labels=lowercase__ , class_labels=lowercase__ ).loss
loss.backward()
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.all_model_classes[1]
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ : Any = True
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : List[Any] = model_class(lowercase__ ).to(lowercase__ )
model.train()
SCREAMING_SNAKE_CASE_ : int = model(lowercase__ , mask_labels=lowercase__ , class_labels=lowercase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE_ : int = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE_ : List[Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowercase__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
snake_case_ = 1E-4
def __lowerCamelCase ( ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def __lowerCamelCase ( self ):
"""simple docstring"""
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def __lowerCamelCase ( self ):
"""simple docstring"""
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(lowercase__ )
SCREAMING_SNAKE_CASE_ : List[str] = self.default_image_processor
SCREAMING_SNAKE_CASE_ : str = prepare_img()
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processor(lowercase__ , return_tensors="pt" ).to(lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowercase__ , (1, 3, 384, 384) )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(**lowercase__ )
SCREAMING_SNAKE_CASE_ : str = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(lowercase__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowercase__ , atol=lowercase__ ) )
SCREAMING_SNAKE_CASE_ : str = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(lowercase__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowercase__ , atol=lowercase__ ) )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(lowercase__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowercase__ , atol=lowercase__ ) )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowercase__ ).eval()
SCREAMING_SNAKE_CASE_ : int = self.default_image_processor
SCREAMING_SNAKE_CASE_ : Optional[int] = prepare_img()
SCREAMING_SNAKE_CASE_ : int = image_processor(lowercase__ , return_tensors="pt" ).to(lowercase__ )
SCREAMING_SNAKE_CASE_ : Any = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowercase__ , (1, 3, 384, 384) )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : int = model(**lowercase__ )
# masks_queries_logits
SCREAMING_SNAKE_CASE_ : Tuple = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor(lowercase__ ).to(lowercase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowercase__ , atol=lowercase__ ) )
# class_queries_logits
SCREAMING_SNAKE_CASE_ : List[Any] = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(lowercase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowercase__ , atol=lowercase__ ) )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowercase__ ).eval()
SCREAMING_SNAKE_CASE_ : Any = self.default_image_processor
SCREAMING_SNAKE_CASE_ : List[str] = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="pt" , )
SCREAMING_SNAKE_CASE_ : Any = inputs["pixel_values"].to(lowercase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [el.to(lowercase__ ) for el in inputs["mask_labels"]]
SCREAMING_SNAKE_CASE_ : int = [el.to(lowercase__ ) for el in inputs["class_labels"]]
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : List[str] = model(**lowercase__ )
self.assertTrue(outputs.loss is not None )
| 68 |
'''simple docstring'''
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = logging.get_logger()
# the current default level is logging.WARNING
SCREAMING_SNAKE_CASE_ : Optional[int] = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = logging.get_verbosity()
SCREAMING_SNAKE_CASE_ : List[Any] = logging.get_logger("transformers.models.bart.tokenization_bart" )
SCREAMING_SNAKE_CASE_ : List[Any] = "Testing 1, 2, 3"
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(lowercase__ ) as cl:
logger.warning(lowercase__ )
self.assertEqual(cl.out , msg + "\n" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(lowercase__ ) as cl:
logger.warning(lowercase__ )
self.assertEqual(cl.out , "" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(lowercase__ ) as cl:
logger.warning(lowercase__ )
self.assertEqual(cl.out , msg + "\n" )
# restore to the original level
logging.set_verbosity(lowercase__ )
@mockenv(TRANSFORMERS_VERBOSITY="error" )
def __lowerCamelCase ( self ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
SCREAMING_SNAKE_CASE_ : Tuple = logging.get_logger("transformers.models.bart.tokenization_bart" )
SCREAMING_SNAKE_CASE_ : int = os.getenv("TRANSFORMERS_VERBOSITY" , lowercase__ )
SCREAMING_SNAKE_CASE_ : Dict = logging.log_levels[env_level_str]
SCREAMING_SNAKE_CASE_ : str = logging.get_verbosity()
self.assertEqual(
lowercase__ , lowercase__ , F"TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}" , )
# restore to the original level
SCREAMING_SNAKE_CASE_ : Optional[int] = ""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="super-error" )
def __lowerCamelCase ( self ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
SCREAMING_SNAKE_CASE_ : List[Any] = logging.logging.getLogger()
with CaptureLogger(lowercase__ ) as cl:
# this action activates the env var
logging.get_logger("transformers.models.bart.tokenization_bart" )
self.assertIn("Unknown option TRANSFORMERS_VERBOSITY=super-error" , cl.out )
# no need to restore as nothing was changed
def __lowerCamelCase ( self ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
SCREAMING_SNAKE_CASE_ : str = logging.get_logger("transformers.models.bart.tokenization_bart" )
SCREAMING_SNAKE_CASE_ : List[Any] = "Testing 1, 2, 3"
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="1" ):
# nothing should be logged as env var disables this method
with CaptureLogger(lowercase__ ) as cl:
logger.warning_advice(lowercase__ )
self.assertEqual(cl.out , "" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(lowercase__ ) as cl:
logger.warning_advice(lowercase__ )
self.assertEqual(cl.out , msg + "\n" )
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 68 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ = {
'configuration_llama': ['LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LlamaConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['LlamaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['LlamaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'LlamaForCausalLM',
'LlamaModel',
'LlamaPreTrainedModel',
'LlamaForSequenceClassification',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 68 |
'''simple docstring'''
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.nn.Linear(2 , 4 )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.optim.AdamW(model.parameters() , lr=1.0 )
SCREAMING_SNAKE_CASE_ : Any = torch.optim.lr_scheduler.OneCycleLR(SCREAMING_SNAKE_CASE_ , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
SCREAMING_SNAKE_CASE_ : Dict = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
SCREAMING_SNAKE_CASE_ : Tuple = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[str] ) -> Tuple:
"""simple docstring"""
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Any ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
@require_cuda
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(lowercase__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = Accelerator(cpu=lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Accelerator()
SCREAMING_SNAKE_CASE_ : Any = GradientState()
assert state.num_steps == 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 4
assert state.num_steps == 4
assert state.sync_gradients is True
SCREAMING_SNAKE_CASE_ : Optional[int] = False
assert state.sync_gradients is False
GradientState._reset_state()
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = Accelerator()
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Optional[int] = create_components()
(
(
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
),
) : Optional[Any] = accelerator.prepare(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = Accelerator()
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : str = create_components()
accelerator.prepare(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def __lowerCamelCase ( self ):
"""simple docstring"""
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*lowercase__ , **lowercase__ ):
pass
with patch("torch.cuda.set_device" , lowercase__ ), patch_environment(ACCELERATE_TORCH_DEVICE="cuda:64" ):
SCREAMING_SNAKE_CASE_ : List[str] = Accelerator()
self.assertEqual(str(accelerator.state.device ) , "cuda:64" )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = Accelerator()
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Dict = create_components()
accelerator.prepare(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = get_signature(lowercase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(lowercase__ )
# make sure random weights don't match
load_random_weights(lowercase__ )
self.assertTrue(abs(model_signature - get_signature(lowercase__ ) ) > 1e-3 )
# make sure loaded weights match
accelerator.load_state(lowercase__ )
self.assertTrue(abs(model_signature - get_signature(lowercase__ ) ) < 1e-3 )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = Accelerator()
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Optional[int] = create_components()
accelerator.prepare(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_signature(lowercase__ )
# saving hook
def save_config(lowercase__ , lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = {"class_name": models[0].__class__.__name__}
with open(os.path.join(lowercase__ , "data.json" ) , "w" ) as f:
json.dump(lowercase__ , lowercase__ )
# loading hook
def load_config(lowercase__ , lowercase__ ):
with open(os.path.join(lowercase__ , "data.json" ) , "r" ) as f:
SCREAMING_SNAKE_CASE_ : Any = json.load(lowercase__ )
SCREAMING_SNAKE_CASE_ : List[str] = config["class_name"]
SCREAMING_SNAKE_CASE_ : Dict = accelerator.register_save_state_pre_hook(lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = accelerator.register_load_state_pre_hook(lowercase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(lowercase__ )
# make sure random weights don't match with hooks
load_random_weights(lowercase__ )
self.assertTrue(abs(model_signature - get_signature(lowercase__ ) ) > 1e-3 )
# random class name to verify correct one is loaded
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "random"
# make sure loaded weights match with hooks
accelerator.load_state(lowercase__ )
self.assertTrue(abs(model_signature - get_signature(lowercase__ ) ) < 1e-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(lowercase__ )
# make sure random weights don't match with hooks removed
load_random_weights(lowercase__ )
self.assertTrue(abs(model_signature - get_signature(lowercase__ ) ) > 1e-3 )
# random class name to verify correct one is loaded
SCREAMING_SNAKE_CASE_ : Tuple = "random"
# make sure loaded weights match with hooks removed
accelerator.load_state(lowercase__ )
self.assertTrue(abs(model_signature - get_signature(lowercase__ ) ) < 1e-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = Accelerator()
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Tuple = create_components()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
# This should work
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Optional[int] = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
self.assertTrue(dummy_obj is None )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = Accelerator()
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Dict = create_components()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [1, 2, 3]
# This should work
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : int = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
self.assertEqual(
getattr(lowercase__ , "_is_accelerate_prepared" , lowercase__ ) , lowercase__ , "Dummy object should have `_is_accelerate_prepared` set to `True`" , )
self.assertEqual(
getattr(lowercase__ , "_is_accelerate_prepared" , lowercase__ ) , lowercase__ , "Model is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowercase__ , "_is_accelerate_prepared" , lowercase__ ) , lowercase__ , "Optimizer is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowercase__ , "_is_accelerate_prepared" , lowercase__ ) , lowercase__ , "Scheduler is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowercase__ , "_is_accelerate_prepared" , lowercase__ ) , lowercase__ , "Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowercase__ , "_is_accelerate_prepared" , lowercase__ ) , lowercase__ , "Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
@slow
@require_bnb
def __lowerCamelCase ( self ):
"""simple docstring"""
from transformers import AutoModelForCausalLM
SCREAMING_SNAKE_CASE_ : Optional[int] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=lowercase__ , device_map={"": 0} , )
SCREAMING_SNAKE_CASE_ : Optional[int] = Accelerator()
# This should work
SCREAMING_SNAKE_CASE_ : List[Any] = accelerator.prepare(lowercase__ )
@slow
@require_bnb
def __lowerCamelCase ( self ):
"""simple docstring"""
from transformers import AutoModelForCausalLM
SCREAMING_SNAKE_CASE_ : Optional[Any] = Accelerator()
with init_empty_weights():
SCREAMING_SNAKE_CASE_ : Tuple = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
SCREAMING_SNAKE_CASE_ : Optional[Any] = infer_auto_device_map(lowercase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = "cpu"
SCREAMING_SNAKE_CASE_ : Tuple = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , device_map=lowercase__ , load_in_abit=lowercase__ , llm_inta_enable_fpaa_cpu_offload=lowercase__ )
# This should not work and get value error
with self.assertRaises(lowercase__ ):
SCREAMING_SNAKE_CASE_ : str = accelerator.prepare(lowercase__ )
@slow
@require_bnb
@require_multi_gpu
def __lowerCamelCase ( self ):
"""simple docstring"""
from transformers import AutoModelForCausalLM
SCREAMING_SNAKE_CASE_ : str = {"distributed_type": DistributedType.MULTI_GPU}
with init_empty_weights():
SCREAMING_SNAKE_CASE_ : Optional[int] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
SCREAMING_SNAKE_CASE_ : str = infer_auto_device_map(lowercase__ )
SCREAMING_SNAKE_CASE_ : Dict = 1
SCREAMING_SNAKE_CASE_ : str = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=lowercase__ , device_map=lowercase__ , )
SCREAMING_SNAKE_CASE_ : Any = Accelerator()
# This should not work and get value error
with self.assertRaises(lowercase__ ):
SCREAMING_SNAKE_CASE_ : Tuple = accelerator.prepare(lowercase__ )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def __lowerCamelCase ( self ):
"""simple docstring"""
from transformers import AutoModelForCausalLM
with init_empty_weights():
SCREAMING_SNAKE_CASE_ : Optional[int] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = infer_auto_device_map(lowercase__ )
SCREAMING_SNAKE_CASE_ : List[str] = 1
SCREAMING_SNAKE_CASE_ : str = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=lowercase__ , device_map=lowercase__ , )
SCREAMING_SNAKE_CASE_ : Any = Accelerator()
# This should work
SCREAMING_SNAKE_CASE_ : Optional[int] = accelerator.prepare(lowercase__ )
@require_cuda
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = torch.nn.Linear(10 , 10 )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.optim.SGD(model.parameters() , lr=0.01 )
SCREAMING_SNAKE_CASE_ : Tuple = Accelerator(cpu=lowercase__ )
SCREAMING_SNAKE_CASE_ : Dict = accelerator.prepare(lowercase__ )
| 68 | 1 |
'''simple docstring'''
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
snake_case_ = '0.12' # assumed parallelism: 8
if is_torch_available():
import torch
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str]=None ) -> List[Any]:
"""simple docstring"""
if rng is None:
SCREAMING_SNAKE_CASE_ : Any = random.Random()
SCREAMING_SNAKE_CASE_ : Any = 1
for dim in shape:
total_dims *= dim
SCREAMING_SNAKE_CASE_ : Any = []
for _ in range(SCREAMING_SNAKE_CASE_ ):
values.append(rng.randint(0 , vocab_size - 1 ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa ).reshape(SCREAMING_SNAKE_CASE_ )
return output
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict=None ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor(SCREAMING_SNAKE_CASE_ , vocab_size=2 , rng=SCREAMING_SNAKE_CASE_ )
# make sure that at least one token is attended to for each batch
SCREAMING_SNAKE_CASE_ : Optional[Any] = 1
return attn_mask
@require_flax
class SCREAMING_SNAKE_CASE__ :
_A = None
_A = ()
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
SCREAMING_SNAKE_CASE_ : List[str] = 2
SCREAMING_SNAKE_CASE_ : Dict = inputs["input_ids"].shape[-1] // 2
SCREAMING_SNAKE_CASE_ : Tuple = inputs["input_ids"][:max_batch_size, :sequence_length]
SCREAMING_SNAKE_CASE_ : Tuple = jnp.ones_like(lowercase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
SCREAMING_SNAKE_CASE_ : Optional[Any] = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
SCREAMING_SNAKE_CASE_ : List[str] = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : List[Any] = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : List[str] = max_length
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_ : Any = model_class(lowercase__ )
SCREAMING_SNAKE_CASE_ : Tuple = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE_ : Union[str, Any] = getattr(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE_ : int = pt_model_class(lowercase__ ).eval()
SCREAMING_SNAKE_CASE_ : List[str] = load_flax_weights_in_pytorch_model(lowercase__ , flax_model.params )
SCREAMING_SNAKE_CASE_ : Tuple = flax_model.generate(lowercase__ ).sequences
SCREAMING_SNAKE_CASE_ : Any = pt_model.generate(torch.tensor(lowercase__ , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
SCREAMING_SNAKE_CASE_ : str = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Dict = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_ : int = False
SCREAMING_SNAKE_CASE_ : Optional[int] = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_ : List[Any] = model_class(lowercase__ )
SCREAMING_SNAKE_CASE_ : List[str] = model.generate(lowercase__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase__ )
SCREAMING_SNAKE_CASE_ : str = jit(model.generate )
SCREAMING_SNAKE_CASE_ : List[str] = jit_generate(lowercase__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : Optional[int] = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_ : Any = model_class(lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = model.generate(lowercase__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = jit(model.generate )
SCREAMING_SNAKE_CASE_ : Optional[int] = jit_generate(lowercase__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Optional[Any] = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : Optional[int] = max_length
SCREAMING_SNAKE_CASE_ : List[str] = 2
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_ : Optional[Any] = model_class(lowercase__ )
SCREAMING_SNAKE_CASE_ : Any = model.generate(lowercase__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase__ )
SCREAMING_SNAKE_CASE_ : int = jit(model.generate )
SCREAMING_SNAKE_CASE_ : int = jit_generate(lowercase__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Optional[int] = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : List[str] = max_length
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 2
SCREAMING_SNAKE_CASE_ : Any = 2
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_ : List[Any] = model_class(lowercase__ )
SCREAMING_SNAKE_CASE_ : str = model.generate(lowercase__ ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Optional[int] = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : str = max_length
SCREAMING_SNAKE_CASE_ : str = 0.8
SCREAMING_SNAKE_CASE_ : List[Any] = 10
SCREAMING_SNAKE_CASE_ : Dict = 0.3
SCREAMING_SNAKE_CASE_ : Optional[int] = 1
SCREAMING_SNAKE_CASE_ : Any = 8
SCREAMING_SNAKE_CASE_ : Tuple = 9
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_ : int = model_class(lowercase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = model.generate(lowercase__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jit(model.generate )
SCREAMING_SNAKE_CASE_ : Tuple = jit_generate(lowercase__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : str = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_ : Any = max_length
SCREAMING_SNAKE_CASE_ : List[Any] = 1
SCREAMING_SNAKE_CASE_ : Any = 8
SCREAMING_SNAKE_CASE_ : Tuple = 9
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_ : Any = model_class(lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = model.generate(lowercase__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase__ )
SCREAMING_SNAKE_CASE_ : str = jit(model.generate )
SCREAMING_SNAKE_CASE_ : Dict = jit_generate(lowercase__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Optional[Any] = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_ : Dict = max_length
SCREAMING_SNAKE_CASE_ : Optional[int] = 2
SCREAMING_SNAKE_CASE_ : Tuple = 1
SCREAMING_SNAKE_CASE_ : Tuple = 8
SCREAMING_SNAKE_CASE_ : Optional[int] = 9
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_ : List[Any] = model_class(lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = model.generate(lowercase__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase__ )
SCREAMING_SNAKE_CASE_ : Dict = jit(model.generate )
SCREAMING_SNAKE_CASE_ : Any = jit_generate(lowercase__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Dict = self._get_input_ids_and_config()
# pad attention mask on the left
SCREAMING_SNAKE_CASE_ : str = attention_mask.at[(0, 0)].set(0 )
SCREAMING_SNAKE_CASE_ : int = False
SCREAMING_SNAKE_CASE_ : List[str] = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_ : Tuple = model_class(lowercase__ )
SCREAMING_SNAKE_CASE_ : Any = model.generate(lowercase__ , attention_mask=lowercase__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase__ )
SCREAMING_SNAKE_CASE_ : Tuple = jit(model.generate )
SCREAMING_SNAKE_CASE_ : str = jit_generate(lowercase__ , attention_mask=lowercase__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._get_input_ids_and_config()
# pad attention mask on the left
SCREAMING_SNAKE_CASE_ : List[str] = attention_mask.at[(0, 0)].set(0 )
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : List[str] = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_ : List[Any] = model_class(lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = model.generate(lowercase__ , attention_mask=lowercase__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = jit(model.generate )
SCREAMING_SNAKE_CASE_ : Any = jit_generate(lowercase__ , attention_mask=lowercase__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Optional[Any] = self._get_input_ids_and_config()
# pad attention mask on the left
SCREAMING_SNAKE_CASE_ : Optional[Any] = attention_mask.at[(0, 0)].set(0 )
SCREAMING_SNAKE_CASE_ : Any = 2
SCREAMING_SNAKE_CASE_ : Dict = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_ : Tuple = model_class(lowercase__ )
SCREAMING_SNAKE_CASE_ : Tuple = model.generate(lowercase__ , attention_mask=lowercase__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase__ )
SCREAMING_SNAKE_CASE_ : Dict = jit(model.generate )
SCREAMING_SNAKE_CASE_ : str = jit_generate(lowercase__ , attention_mask=lowercase__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
SCREAMING_SNAKE_CASE_ : Optional[Any] = "Hello world"
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer(lowercase__ , return_tensors="np" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(lowercase__ , "do_samples" ):
model.generate(lowercase__ , do_samples=lowercase__ )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(lowercase__ , "foo" ):
SCREAMING_SNAKE_CASE_ : Any = {"foo": "bar"}
model.generate(lowercase__ , **lowercase__ )
| 68 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'facebook/xmod-base': 'https://huggingface.co/facebook/xmod-base/resolve/main/config.json',
'facebook/xmod-large-prenorm': 'https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json',
'facebook/xmod-base-13-125k': 'https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json',
'facebook/xmod-base-30-125k': 'https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json',
'facebook/xmod-base-30-195k': 'https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json',
'facebook/xmod-base-60-125k': 'https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json',
'facebook/xmod-base-60-265k': 'https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json',
'facebook/xmod-base-75-125k': 'https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json',
'facebook/xmod-base-75-269k': 'https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
_A = "xmod"
def __init__( self , lowercase__=3_0522 , lowercase__=768 , lowercase__=12 , lowercase__=12 , lowercase__=3072 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=512 , lowercase__=2 , lowercase__=0.02 , lowercase__=1e-12 , lowercase__=1 , lowercase__=0 , lowercase__=2 , lowercase__="absolute" , lowercase__=True , lowercase__=None , lowercase__=False , lowercase__=2 , lowercase__=False , lowercase__=True , lowercase__=True , lowercase__=("en_XX",) , lowercase__=None , **lowercase__ , ):
"""simple docstring"""
super().__init__(pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__ )
SCREAMING_SNAKE_CASE_ : Tuple = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE_ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE_ : List[str] = hidden_act
SCREAMING_SNAKE_CASE_ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE_ : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Dict = type_vocab_size
SCREAMING_SNAKE_CASE_ : str = initializer_range
SCREAMING_SNAKE_CASE_ : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Tuple = position_embedding_type
SCREAMING_SNAKE_CASE_ : str = use_cache
SCREAMING_SNAKE_CASE_ : Optional[int] = classifier_dropout
SCREAMING_SNAKE_CASE_ : int = pre_norm
SCREAMING_SNAKE_CASE_ : Optional[int] = adapter_reduction_factor
SCREAMING_SNAKE_CASE_ : List[str] = adapter_layer_norm
SCREAMING_SNAKE_CASE_ : List[str] = adapter_reuse_layer_norm
SCREAMING_SNAKE_CASE_ : int = ln_before_adapter
SCREAMING_SNAKE_CASE_ : List[Any] = list(lowercase__ )
SCREAMING_SNAKE_CASE_ : Any = default_language
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ : Tuple = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 68 | 1 |
'''simple docstring'''
from __future__ import annotations
snake_case_ = [True] * 1_0_0_0_0_0_1
snake_case_ = 2
while i * i <= 1_0_0_0_0_0_0:
if seive[i]:
for j in range(i * i, 1_0_0_0_0_0_1, i):
snake_case_ = False
i += 1
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int ) -> bool:
"""simple docstring"""
return seive[n]
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int ) -> bool:
"""simple docstring"""
return any(digit in "02468" for digit in str(SCREAMING_SNAKE_CASE_ ) )
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int = 1_0_0_0_0_0_0 ) -> list[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(SCREAMING_SNAKE_CASE_ ) and not contains_an_even_digit(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = str(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Tuple = [int(str_num[j:] + str_num[:j] ) for j in range(len(SCREAMING_SNAKE_CASE_ ) )]
if all(is_prime(SCREAMING_SNAKE_CASE_ ) for i in list_nums ):
result.append(SCREAMING_SNAKE_CASE_ )
return result
def __lowerCamelCase ( ) -> int:
"""simple docstring"""
return len(find_circular_primes() )
if __name__ == "__main__":
print(F'''{len(find_circular_primes()) = }''')
| 68 |
'''simple docstring'''
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 68 | 1 |
'''simple docstring'''
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : list[int] ) -> list[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = len(SCREAMING_SNAKE_CASE_ )
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE_ ):
if numbers[j] < numbers[i]:
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Tuple = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
snake_case_ = input('Enter numbers separated by a comma:\n').strip()
snake_case_ = [int(item) for item in user_input.split(',')]
print(exchange_sort(unsorted))
| 68 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'Intel/dpt-large': 'https://huggingface.co/Intel/dpt-large/resolve/main/config.json',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
_A = "dpt"
def __init__( self , lowercase__=768 , lowercase__=12 , lowercase__=12 , lowercase__=3072 , lowercase__="gelu" , lowercase__=0.0 , lowercase__=0.0 , lowercase__=0.02 , lowercase__=1e-12 , lowercase__=384 , lowercase__=16 , lowercase__=3 , lowercase__=False , lowercase__=True , lowercase__=[2, 5, 8, 11] , lowercase__="project" , lowercase__=[4, 2, 1, 0.5] , lowercase__=[96, 192, 384, 768] , lowercase__=256 , lowercase__=-1 , lowercase__=False , lowercase__=True , lowercase__=0.4 , lowercase__=255 , lowercase__=0.1 , lowercase__=[1, 1024, 24, 24] , lowercase__=[0, 1] , lowercase__=None , **lowercase__ , ):
"""simple docstring"""
super().__init__(**lowercase__ )
SCREAMING_SNAKE_CASE_ : Any = hidden_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("Initializing the config with a `BiT` backbone." )
SCREAMING_SNAKE_CASE_ : Tuple = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BitConfig(**lowercase__ )
elif isinstance(lowercase__ , lowercase__ ):
logger.info("Initializing the config with a `BiT` backbone." )
SCREAMING_SNAKE_CASE_ : Dict = BitConfig(**lowercase__ )
elif isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = backbone_config
else:
raise ValueError(
F"backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}." )
SCREAMING_SNAKE_CASE_ : List[Any] = backbone_featmap_shape
SCREAMING_SNAKE_CASE_ : Union[str, Any] = neck_ignore_stages
if readout_type != "project":
raise ValueError("Readout type must be 'project' when using `DPT-hybrid` mode." )
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
SCREAMING_SNAKE_CASE_ : int = None
SCREAMING_SNAKE_CASE_ : List[Any] = []
SCREAMING_SNAKE_CASE_ : int = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Tuple = intermediate_size
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE_ : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : str = initializer_range
SCREAMING_SNAKE_CASE_ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Any = image_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = patch_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = qkv_bias
SCREAMING_SNAKE_CASE_ : Optional[Any] = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("Readout_type must be one of ['ignore', 'add', 'project']" )
SCREAMING_SNAKE_CASE_ : Any = readout_type
SCREAMING_SNAKE_CASE_ : Optional[Any] = reassemble_factors
SCREAMING_SNAKE_CASE_ : str = neck_hidden_sizes
SCREAMING_SNAKE_CASE_ : Union[str, Any] = fusion_hidden_size
SCREAMING_SNAKE_CASE_ : Any = head_in_index
SCREAMING_SNAKE_CASE_ : str = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
SCREAMING_SNAKE_CASE_ : List[Any] = use_auxiliary_head
SCREAMING_SNAKE_CASE_ : int = auxiliary_loss_weight
SCREAMING_SNAKE_CASE_ : Union[str, Any] = semantic_loss_ignore_index
SCREAMING_SNAKE_CASE_ : Any = semantic_classifier_dropout
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
SCREAMING_SNAKE_CASE_ : List[str] = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE_ : Optional[int] = self.__class__.model_type
return output
| 68 | 1 |
'''simple docstring'''
from math import ceil
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = list(range(0 , SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ : int = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
SCREAMING_SNAKE_CASE_ : Dict = []
for i in device_map_blocks:
if device_map_blocks.count(SCREAMING_SNAKE_CASE_ ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(SCREAMING_SNAKE_CASE_ )
# Missing blocks
SCREAMING_SNAKE_CASE_ : str = [i for i in blocks if i not in device_map_blocks]
SCREAMING_SNAKE_CASE_ : int = [i for i in device_map_blocks if i not in blocks]
if len(SCREAMING_SNAKE_CASE_ ) != 0:
raise ValueError(
"Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."
" These attention blocks were specified more than once: " + str(SCREAMING_SNAKE_CASE_ ) )
if len(SCREAMING_SNAKE_CASE_ ) != 0:
raise ValueError(
"There are attention blocks for this model that are not specified in the device_map. Add these attention "
"blocks to a device on the device_map: " + str(SCREAMING_SNAKE_CASE_ ) )
if len(SCREAMING_SNAKE_CASE_ ) != 0:
raise ValueError(
"The device_map contains more attention blocks than this model has. Remove these from the device_map:"
+ str(SCREAMING_SNAKE_CASE_ ) )
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = list(range(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ : str = int(ceil(n_layers / len(SCREAMING_SNAKE_CASE_ ) ) )
SCREAMING_SNAKE_CASE_ : Any = [layers[i : i + n_blocks] for i in range(0 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )]
return dict(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
| 68 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self , lowercase__ , lowercase__=7 , lowercase__=3 , lowercase__=18 , lowercase__=30 , lowercase__=400 , lowercase__=True , lowercase__=None , lowercase__=True , lowercase__=None , lowercase__=True , lowercase__=[0.48145466, 0.4578275, 0.40821073] , lowercase__=[0.26862954, 0.26130258, 0.27577711] , lowercase__=True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = size if size is not None else {"height": 224, "width": 224}
SCREAMING_SNAKE_CASE_ : Optional[Any] = crop_size if crop_size is not None else {"height": 18, "width": 18}
SCREAMING_SNAKE_CASE_ : str = parent
SCREAMING_SNAKE_CASE_ : List[Any] = batch_size
SCREAMING_SNAKE_CASE_ : Dict = num_channels
SCREAMING_SNAKE_CASE_ : Any = image_size
SCREAMING_SNAKE_CASE_ : Tuple = min_resolution
SCREAMING_SNAKE_CASE_ : Optional[Any] = max_resolution
SCREAMING_SNAKE_CASE_ : Tuple = do_resize
SCREAMING_SNAKE_CASE_ : List[str] = size
SCREAMING_SNAKE_CASE_ : str = do_center_crop
SCREAMING_SNAKE_CASE_ : List[str] = crop_size
SCREAMING_SNAKE_CASE_ : int = do_normalize
SCREAMING_SNAKE_CASE_ : Optional[int] = image_mean
SCREAMING_SNAKE_CASE_ : Dict = image_std
SCREAMING_SNAKE_CASE_ : List[Any] = do_convert_rgb
def __lowerCamelCase ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def __lowerCamelCase ( self , lowercase__=False , lowercase__=False , lowercase__=False ):
"""simple docstring"""
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
SCREAMING_SNAKE_CASE_ : str = []
for i in range(self.batch_size ):
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Optional[Any] = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
SCREAMING_SNAKE_CASE_ : str = [Image.fromarray(np.moveaxis(lowercase__ , 0 , -1 ) ) for x in image_inputs]
if torchify:
SCREAMING_SNAKE_CASE_ : List[str] = [torch.from_numpy(lowercase__ ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase,unittest.TestCase ):
_A = ChineseCLIPImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = ChineseCLIPImageProcessingTester(self , do_center_crop=lowercase__ )
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase__ , "do_resize" ) )
self.assertTrue(hasattr(lowercase__ , "size" ) )
self.assertTrue(hasattr(lowercase__ , "do_center_crop" ) )
self.assertTrue(hasattr(lowercase__ , "center_crop" ) )
self.assertTrue(hasattr(lowercase__ , "do_normalize" ) )
self.assertTrue(hasattr(lowercase__ , "image_mean" ) )
self.assertTrue(hasattr(lowercase__ , "image_std" ) )
self.assertTrue(hasattr(lowercase__ , "do_convert_rgb" ) )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 224, "width": 224} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
SCREAMING_SNAKE_CASE_ : str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def __lowerCamelCase ( self ):
"""simple docstring"""
pass
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ : Tuple = self.image_processor_tester.prepare_inputs(equal_resolution=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(lowercase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.image_processor_tester.prepare_inputs(equal_resolution=lowercase__ , numpify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ : List[str] = image_processing(lowercase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ : List[str] = self.image_processor_tester.prepare_inputs(equal_resolution=lowercase__ , torchify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ : int = image_processing(lowercase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase,unittest.TestCase ):
_A = ChineseCLIPImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=lowercase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 3
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase__ , "do_resize" ) )
self.assertTrue(hasattr(lowercase__ , "size" ) )
self.assertTrue(hasattr(lowercase__ , "do_center_crop" ) )
self.assertTrue(hasattr(lowercase__ , "center_crop" ) )
self.assertTrue(hasattr(lowercase__ , "do_normalize" ) )
self.assertTrue(hasattr(lowercase__ , "image_mean" ) )
self.assertTrue(hasattr(lowercase__ , "image_std" ) )
self.assertTrue(hasattr(lowercase__ , "do_convert_rgb" ) )
def __lowerCamelCase ( self ):
"""simple docstring"""
pass
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ : Optional[int] = self.image_processor_tester.prepare_inputs(equal_resolution=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ : List[str] = image_processing(lowercase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 68 | 1 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
_A = ["image_processor", "tokenizer"]
_A = "ViltImageProcessor"
_A = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , lowercase__=None , lowercase__=None , **lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowercase__ , )
SCREAMING_SNAKE_CASE_ : Any = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE_ : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.image_processor
def __call__( self , lowercase__ , lowercase__ = None , lowercase__ = True , lowercase__ = False , lowercase__ = None , lowercase__ = None , lowercase__ = 0 , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = False , lowercase__ = False , lowercase__ = False , lowercase__ = False , lowercase__ = True , lowercase__ = None , **lowercase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.tokenizer(
text=lowercase__ , add_special_tokens=lowercase__ , padding=lowercase__ , truncation=lowercase__ , max_length=lowercase__ , stride=lowercase__ , pad_to_multiple_of=lowercase__ , return_token_type_ids=lowercase__ , return_attention_mask=lowercase__ , return_overflowing_tokens=lowercase__ , return_special_tokens_mask=lowercase__ , return_offsets_mapping=lowercase__ , return_length=lowercase__ , verbose=lowercase__ , return_tensors=lowercase__ , **lowercase__ , )
# add pixel_values + pixel_mask
SCREAMING_SNAKE_CASE_ : str = self.image_processor(lowercase__ , return_tensors=lowercase__ )
encoding.update(lowercase__ )
return encoding
def __lowerCamelCase ( self , *lowercase__ , **lowercase__ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowercase__ , **lowercase__ )
def __lowerCamelCase ( self , *lowercase__ , **lowercase__ ):
"""simple docstring"""
return self.tokenizer.decode(*lowercase__ , **lowercase__ )
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE_ : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowercase__ , )
return self.image_processor_class
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowercase__ , )
return self.image_processor
| 68 |
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = str(SCREAMING_SNAKE_CASE_ )
return len(SCREAMING_SNAKE_CASE_ ) == 9 and set(SCREAMING_SNAKE_CASE_ ) == set("123456789" )
def __lowerCamelCase ( ) -> int | None:
"""simple docstring"""
for base_num in range(9_9_9_9 , 4_9_9_9 , -1 ):
SCREAMING_SNAKE_CASE_ : int = 1_0_0_0_0_2 * base_num
if is_9_pandigital(SCREAMING_SNAKE_CASE_ ):
return candidate
for base_num in range(3_3_3 , 9_9 , -1 ):
SCREAMING_SNAKE_CASE_ : List[str] = 1_0_0_2_0_0_3 * base_num
if is_9_pandigital(SCREAMING_SNAKE_CASE_ ):
return candidate
return None
if __name__ == "__main__":
print(F'''{solution() = }''')
| 68 | 1 |
'''simple docstring'''
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase,unittest.TestCase ):
_A = BertTokenizer
_A = BertTokenizerFast
_A = True
_A = True
_A = filter_non_english
def __lowerCamelCase ( self ):
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE_ : str = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
SCREAMING_SNAKE_CASE_ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = "UNwant\u00E9d,running"
SCREAMING_SNAKE_CASE_ : str = "unwanted, running"
return input_text, output_text
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(lowercase__ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) , [9, 6, 7, 12, 10, 11] )
def __lowerCamelCase ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE_ : Any = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ : List[str] = "UNwant\u00E9d,running"
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer.tokenize(lowercase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = rust_tokenizer.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = rust_tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.encode(lowercase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = rust_tokenizer.encode(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
# With lower casing
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_tokenizer(do_lower_case=lowercase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_rust_tokenizer(do_lower_case=lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = "UNwant\u00E9d,running"
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer.tokenize(lowercase__ )
SCREAMING_SNAKE_CASE_ : Any = rust_tokenizer.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = rust_tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE_ : int = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.encode(lowercase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = rust_tokenizer.encode(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = BasicTokenizer(do_lower_case=lowercase__ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = BasicTokenizer(do_lower_case=lowercase__ , strip_accents=lowercase__ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = BasicTokenizer(do_lower_case=lowercase__ , strip_accents=lowercase__ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = BasicTokenizer(do_lower_case=lowercase__ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = BasicTokenizer(do_lower_case=lowercase__ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = BasicTokenizer(do_lower_case=lowercase__ , strip_accents=lowercase__ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BasicTokenizer(do_lower_case=lowercase__ , strip_accents=lowercase__ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = BasicTokenizer(do_lower_case=lowercase__ , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = BasicTokenizer()
SCREAMING_SNAKE_CASE_ : Optional[Any] = "a\n'll !!to?'d of, can't."
SCREAMING_SNAKE_CASE_ : List[Any] = ["a", "'", "ll", "!", "!", "to", "?", "'", "d", "of", ",", "can", "'", "t", "."]
self.assertListEqual(tokenizer.tokenize(lowercase__ ) , lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
SCREAMING_SNAKE_CASE_ : Dict = {}
for i, token in enumerate(lowercase__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = i
SCREAMING_SNAKE_CASE_ : str = WordpieceTokenizer(vocab=lowercase__ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def __lowerCamelCase ( self ):
"""simple docstring"""
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def __lowerCamelCase ( self ):
"""simple docstring"""
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def __lowerCamelCase ( self ):
"""simple docstring"""
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Dict = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowercase__ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowercase__ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
@slow
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.tokenizer_class.from_pretrained("bert-base-uncased" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer.encode("sequence builders" , add_special_tokens=lowercase__ )
SCREAMING_SNAKE_CASE_ : str = tokenizer.encode("multi-sequence build" , add_special_tokens=lowercase__ )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.build_inputs_with_special_tokens(lowercase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(lowercase__ , lowercase__ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def __lowerCamelCase ( self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
SCREAMING_SNAKE_CASE_ : List[Any] = self.rust_tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
SCREAMING_SNAKE_CASE_ : Tuple = F"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
SCREAMING_SNAKE_CASE_ : str = tokenizer_r.encode_plus(
lowercase__ , return_attention_mask=lowercase__ , return_token_type_ids=lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ , )
SCREAMING_SNAKE_CASE_ : int = tokenizer_r.do_lower_case if hasattr(lowercase__ , "do_lower_case" ) else False
SCREAMING_SNAKE_CASE_ : Tuple = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = ["的", "人", "有"]
SCREAMING_SNAKE_CASE_ : Optional[int] = "".join(lowercase__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : int = self.tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
SCREAMING_SNAKE_CASE_ : Any = self.rust_tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
SCREAMING_SNAKE_CASE_ : Any = tokenizer_p.encode(lowercase__ , add_special_tokens=lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer_r.encode(lowercase__ , add_special_tokens=lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer_r.convert_ids_to_tokens(lowercase__ )
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer_p.convert_ids_to_tokens(lowercase__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowercase__ , lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer_r.encode(lowercase__ , add_special_tokens=lowercase__ )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer_p.encode(lowercase__ , add_special_tokens=lowercase__ )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer_r.convert_ids_to_tokens(lowercase__ )
SCREAMING_SNAKE_CASE_ : Any = tokenizer_p.convert_ids_to_tokens(lowercase__ )
# it is expected that only the first Chinese character is not preceded by "##".
SCREAMING_SNAKE_CASE_ : Optional[int] = [
F"##{token}" if idx != 0 else token for idx, token in enumerate(lowercase__ )
]
self.assertListEqual(lowercase__ , lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
| 68 |
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__( self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = True , lowercase__ = None , lowercase__ = False , lowercase__ = None , lowercase__ = True , lowercase__ = "arrow" , **lowercase__ , ):
"""simple docstring"""
super().__init__(
split=lowercase__ , features=lowercase__ , cache_dir=lowercase__ , keep_in_memory=lowercase__ , streaming=lowercase__ , **lowercase__ , )
SCREAMING_SNAKE_CASE_ : Any = load_from_cache_file
SCREAMING_SNAKE_CASE_ : Optional[int] = file_format
SCREAMING_SNAKE_CASE_ : List[Any] = Spark(
df=lowercase__ , features=lowercase__ , cache_dir=lowercase__ , working_dir=lowercase__ , **lowercase__ , )
def __lowerCamelCase ( self ):
"""simple docstring"""
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
SCREAMING_SNAKE_CASE_ : Optional[int] = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=lowercase__ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 68 | 1 |
'''simple docstring'''
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list ) -> Tuple:
"""simple docstring"""
_enforce_args(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if n == 0:
return 0
SCREAMING_SNAKE_CASE_ : List[Any] = float("-inf" )
for i in range(1 , n + 1 ):
SCREAMING_SNAKE_CASE_ : str = max(
SCREAMING_SNAKE_CASE_ , prices[i - 1] + naive_cut_rod_recursive(n - i , SCREAMING_SNAKE_CASE_ ) )
return max_revue
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list ) -> int:
"""simple docstring"""
_enforce_args(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : List[Any] = [float("-inf" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : list ) -> List[Any]:
"""simple docstring"""
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
SCREAMING_SNAKE_CASE_ : Any = float("-inf" )
for i in range(1 , n + 1 ):
SCREAMING_SNAKE_CASE_ : List[Any] = max(
SCREAMING_SNAKE_CASE_ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , )
SCREAMING_SNAKE_CASE_ : str = max_revenue
return max_rev[n]
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list ) -> Any:
"""simple docstring"""
_enforce_args(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
SCREAMING_SNAKE_CASE_ : Optional[int] = [float("-inf" ) for _ in range(n + 1 )]
SCREAMING_SNAKE_CASE_ : Dict = 0
for i in range(1 , n + 1 ):
SCREAMING_SNAKE_CASE_ : Optional[int] = max_rev[i]
for j in range(1 , i + 1 ):
SCREAMING_SNAKE_CASE_ : Optional[int] = max(SCREAMING_SNAKE_CASE_ , prices[j - 1] + max_rev[i - j] )
SCREAMING_SNAKE_CASE_ : str = max_revenue_i
return max_rev[n]
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list ) -> str:
"""simple docstring"""
if n < 0:
SCREAMING_SNAKE_CASE_ : str = F"n must be greater than or equal to 0. Got n = {n}"
raise ValueError(SCREAMING_SNAKE_CASE_ )
if n > len(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ : str = (
"Each integral piece of rod must have a corresponding price. "
F"Got n = {n} but length of prices = {len(SCREAMING_SNAKE_CASE_ )}"
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = [6, 1_0, 1_2, 1_5, 2_0, 2_3]
SCREAMING_SNAKE_CASE_ : List[Any] = len(SCREAMING_SNAKE_CASE_ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
SCREAMING_SNAKE_CASE_ : Optional[int] = 3_6
SCREAMING_SNAKE_CASE_ : Optional[int] = top_down_cut_rod(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = bottom_up_cut_rod(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Any = naive_cut_rod_recursive(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 68 |
'''simple docstring'''
import re
import string
import numpy as np
import datasets
snake_case_ = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
snake_case_ = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
snake_case_ = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION,_KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def __lowerCamelCase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , reference_urls=[] , )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__=None , lowercase__=False , lowercase__=False , lowercase__=False , ):
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array([re.sub(lowercase__ , "" , lowercase__ ) for x in predictions] )
SCREAMING_SNAKE_CASE_ : List[Any] = np.array([re.sub(lowercase__ , "" , lowercase__ ) for x in references] )
else:
SCREAMING_SNAKE_CASE_ : int = np.asarray(lowercase__ )
SCREAMING_SNAKE_CASE_ : Any = np.asarray(lowercase__ )
if ignore_case:
SCREAMING_SNAKE_CASE_ : Dict = np.char.lower(lowercase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = np.char.lower(lowercase__ )
if ignore_punctuation:
SCREAMING_SNAKE_CASE_ : Optional[int] = string.punctuation.maketrans("" , "" , string.punctuation )
SCREAMING_SNAKE_CASE_ : int = np.char.translate(lowercase__ , table=lowercase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.char.translate(lowercase__ , table=lowercase__ )
if ignore_numbers:
SCREAMING_SNAKE_CASE_ : Optional[int] = string.digits.maketrans("" , "" , string.digits )
SCREAMING_SNAKE_CASE_ : Dict = np.char.translate(lowercase__ , table=lowercase__ )
SCREAMING_SNAKE_CASE_ : int = np.char.translate(lowercase__ , table=lowercase__ )
SCREAMING_SNAKE_CASE_ : str = predictions == references
return {"exact_match": np.mean(lowercase__ ) * 100}
| 68 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {'openai-gpt': 'https://huggingface.co/openai-gpt/resolve/main/config.json'}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
_A = "openai-gpt"
_A = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , lowercase__=4_0478 , lowercase__=512 , lowercase__=768 , lowercase__=12 , lowercase__=12 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=1e-5 , lowercase__=0.02 , lowercase__="cls_index" , lowercase__=True , lowercase__=None , lowercase__=True , lowercase__=0.1 , **lowercase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = vocab_size
SCREAMING_SNAKE_CASE_ : int = n_positions
SCREAMING_SNAKE_CASE_ : int = n_embd
SCREAMING_SNAKE_CASE_ : Any = n_layer
SCREAMING_SNAKE_CASE_ : List[str] = n_head
SCREAMING_SNAKE_CASE_ : Dict = afn
SCREAMING_SNAKE_CASE_ : int = resid_pdrop
SCREAMING_SNAKE_CASE_ : str = embd_pdrop
SCREAMING_SNAKE_CASE_ : str = attn_pdrop
SCREAMING_SNAKE_CASE_ : Union[str, Any] = layer_norm_epsilon
SCREAMING_SNAKE_CASE_ : List[str] = initializer_range
SCREAMING_SNAKE_CASE_ : Tuple = summary_type
SCREAMING_SNAKE_CASE_ : Optional[int] = summary_use_proj
SCREAMING_SNAKE_CASE_ : Optional[Any] = summary_activation
SCREAMING_SNAKE_CASE_ : List[str] = summary_first_dropout
SCREAMING_SNAKE_CASE_ : Tuple = summary_proj_to_labels
super().__init__(**lowercase__ )
| 68 |
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case_ = logging.get_logger(__name__)
snake_case_ = {'vocab_file': 'spiece.model'}
snake_case_ = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
snake_case_ = {
'AI-Sweden/gpt-sw3-126m': 2_0_4_8,
'AI-Sweden/gpt-sw3-350m': 2_0_4_8,
'AI-Sweden/gpt-sw3-1.6b': 2_0_4_8,
'AI-Sweden/gpt-sw3-6.7b': 2_0_4_8,
'AI-Sweden/gpt-sw3-20b': 2_0_4_8,
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = ["input_ids", "attention_mask"]
def __init__( self , lowercase__ , lowercase__=False , lowercase__=False , lowercase__=False , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__ = None , **lowercase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
SCREAMING_SNAKE_CASE_ : Dict = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
SCREAMING_SNAKE_CASE_ : str = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
SCREAMING_SNAKE_CASE_ : List[Any] = "<|endoftext|>" if eos_token is None else eos_token
SCREAMING_SNAKE_CASE_ : Dict = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
SCREAMING_SNAKE_CASE_ : Tuple = unk_token if pad_token is None else pad_token
SCREAMING_SNAKE_CASE_ : Optional[Any] = eos_token if bos_token is None else bos_token
else:
SCREAMING_SNAKE_CASE_ : int = "<pad>" if pad_token is None else pad_token
SCREAMING_SNAKE_CASE_ : Any = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=lowercase__ , remove_space=lowercase__ , keep_accents=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , unk_token=lowercase__ , pad_token=lowercase__ , sp_model_kwargs=self.sp_model_kwargs , **lowercase__ , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = do_lower_case
SCREAMING_SNAKE_CASE_ : Optional[int] = remove_space
SCREAMING_SNAKE_CASE_ : int = keep_accents
SCREAMING_SNAKE_CASE_ : List[Any] = vocab_file
SCREAMING_SNAKE_CASE_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase__ )
# Used for whitespace normalization in input texts
# fmt : off
SCREAMING_SNAKE_CASE_ : int = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(
F"[{''.join(map(lowercase__ , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]" )
def __getstate__( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : Dict = None
return state
def __setstate__( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {}
SCREAMING_SNAKE_CASE_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def __lowerCamelCase ( self ):
"""simple docstring"""
return len(self.sp_model )
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.non_printing_characters_re.sub("" , lowercase__ )
# Normalize whitespaces
SCREAMING_SNAKE_CASE_ : List[str] = "".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
SCREAMING_SNAKE_CASE_ : List[Any] = unicodedata.normalize("NFC" , lowercase__ )
return text
def __lowerCamelCase ( self , lowercase__ , **lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.preprocess_text(lowercase__ )
return self.sp_model.encode(lowercase__ , out_type=lowercase__ )
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
return self.sp_model.PieceToId(lowercase__ )
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
return self.sp_model.IdToPiece(lowercase__ )
@staticmethod
def __lowerCamelCase ( lowercase__ ):
"""simple docstring"""
return out_string
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
SCREAMING_SNAKE_CASE_ : Any = ""
SCREAMING_SNAKE_CASE_ : Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase__ ) + token
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
SCREAMING_SNAKE_CASE_ : int = []
else:
current_sub_tokens.append(lowercase__ )
SCREAMING_SNAKE_CASE_ : List[str] = False
out_string += self.sp_model.decode(lowercase__ )
return out_string
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = {self.convert_ids_to_tokens(lowercase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCamelCase ( self , lowercase__ , lowercase__ = None ):
"""simple docstring"""
if not os.path.isdir(lowercase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE_ : Any = os.path.join(
lowercase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase__ , "wb" ) as fi:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(lowercase__ )
return (out_vocab_file,)
def __lowerCamelCase ( self , lowercase__ , lowercase__ = False ):
"""simple docstring"""
if isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = self.preprocess_text(lowercase__ )
SCREAMING_SNAKE_CASE_ : Any = self.sp_model.encode(lowercase__ )
else:
SCREAMING_SNAKE_CASE_ : str = [self.preprocess_text(lowercase__ ) for t in text]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.sp_model.encode(lowercase__ )
if return_tensors is True or return_tensors == "pt":
SCREAMING_SNAKE_CASE_ : str = torch.tensor(lowercase__ )
return token_ids
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
return self.sp_model.decode(lowercase__ )
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [F"User: {text}" if is_user else F"Bot: {text}" for is_user, text in conversation.iter_texts()]
SCREAMING_SNAKE_CASE_ : List[str] = (
F"{self.eos_token}{self.bos_token}" + F"{self.bos_token}".join(lowercase__ ) + F"{self.bos_token}Bot:"
)
return self.encode(text=lowercase__ )
| 68 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
snake_case_ = None
snake_case_ = logging.get_logger(__name__)
snake_case_ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
snake_case_ = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'
),
},
}
snake_case_ = {
'moussaKam/mbarthez': 1_0_2_4,
'moussaKam/barthez': 1_0_2_4,
'moussaKam/barthez-orangesum-title': 1_0_2_4,
}
snake_case_ = '▁'
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = ["input_ids", "attention_mask"]
_A = BarthezTokenizer
def __init__( self , lowercase__=None , lowercase__=None , lowercase__="<s>" , lowercase__="</s>" , lowercase__="</s>" , lowercase__="<s>" , lowercase__="<unk>" , lowercase__="<pad>" , lowercase__="<mask>" , **lowercase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else mask_token
super().__init__(
lowercase__ , tokenizer_file=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , unk_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , **lowercase__ , )
SCREAMING_SNAKE_CASE_ : Any = vocab_file
SCREAMING_SNAKE_CASE_ : Tuple = False if not self.vocab_file else True
def __lowerCamelCase ( self , lowercase__ , lowercase__ = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.cls_token_id]
SCREAMING_SNAKE_CASE_ : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCamelCase ( self , lowercase__ , lowercase__ = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCamelCase ( self , lowercase__ , lowercase__ = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowercase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(
lowercase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ):
copyfile(self.vocab_file , lowercase__ )
return (out_vocab_file,)
| 68 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
snake_case_ = True
except (ImportError, ModuleNotFoundError):
snake_case_ = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
re.sub("<n>" , "" , SCREAMING_SNAKE_CASE_ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(SCREAMING_SNAKE_CASE_ ) )
| 68 | 1 |
'''simple docstring'''
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
snake_case_ = [
'python',
'tqdm',
'regex',
'requests',
'packaging',
'filelock',
'numpy',
'tokenizers',
'huggingface-hub',
'safetensors',
'accelerate',
'pyyaml',
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict=None ) -> Union[str, Any]:
"""simple docstring"""
require_version(deps[pkg] , SCREAMING_SNAKE_CASE_ )
| 68 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self , lowercase__ , lowercase__=2 , lowercase__=3 , lowercase__=4 , lowercase__=2 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=99 , lowercase__=36 , lowercase__=2 , lowercase__=4 , lowercase__=37 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=512 , lowercase__=16 , lowercase__=2 , lowercase__=0.02 , lowercase__=6 , lowercase__=6 , lowercase__=3 , lowercase__=4 , lowercase__=None , lowercase__=1000 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = parent
SCREAMING_SNAKE_CASE_ : Optional[int] = batch_size
SCREAMING_SNAKE_CASE_ : Dict = num_channels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_size
SCREAMING_SNAKE_CASE_ : Optional[int] = patch_size
SCREAMING_SNAKE_CASE_ : str = is_training
SCREAMING_SNAKE_CASE_ : str = use_input_mask
SCREAMING_SNAKE_CASE_ : Any = use_token_type_ids
SCREAMING_SNAKE_CASE_ : int = use_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE_ : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Any = num_attention_heads
SCREAMING_SNAKE_CASE_ : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : str = hidden_act
SCREAMING_SNAKE_CASE_ : int = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Dict = type_vocab_size
SCREAMING_SNAKE_CASE_ : List[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Tuple = initializer_range
SCREAMING_SNAKE_CASE_ : List[str] = coordinate_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = shape_size
SCREAMING_SNAKE_CASE_ : List[str] = num_labels
SCREAMING_SNAKE_CASE_ : Optional[int] = num_choices
SCREAMING_SNAKE_CASE_ : Union[str, Any] = scope
SCREAMING_SNAKE_CASE_ : Dict = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = text_seq_length
SCREAMING_SNAKE_CASE_ : Tuple = (image_size // patch_size) ** 2 + 1
SCREAMING_SNAKE_CASE_ : Optional[int] = self.text_seq_length + self.image_seq_length
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
SCREAMING_SNAKE_CASE_ : Dict = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
SCREAMING_SNAKE_CASE_ : Optional[int] = bbox[i, j, 3]
SCREAMING_SNAKE_CASE_ : str = bbox[i, j, 1]
SCREAMING_SNAKE_CASE_ : Dict = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
SCREAMING_SNAKE_CASE_ : List[Any] = bbox[i, j, 2]
SCREAMING_SNAKE_CASE_ : Dict = bbox[i, j, 0]
SCREAMING_SNAKE_CASE_ : Tuple = tmp_coordinate
SCREAMING_SNAKE_CASE_ : Dict = tf.constant(lowercase__ )
SCREAMING_SNAKE_CASE_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ : Dict = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Any = random_attention_mask([self.batch_size, self.text_seq_length] )
SCREAMING_SNAKE_CASE_ : List[str] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : Tuple = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ : str = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = TFLayoutLMvaModel(config=lowercase__ )
# text + image
SCREAMING_SNAKE_CASE_ : int = model(lowercase__ , pixel_values=lowercase__ , training=lowercase__ )
SCREAMING_SNAKE_CASE_ : str = model(
lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , training=lowercase__ , )
SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ , training=lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
SCREAMING_SNAKE_CASE_ : Tuple = model(lowercase__ , training=lowercase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
SCREAMING_SNAKE_CASE_ : int = model({"pixel_values": pixel_values} , training=lowercase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.num_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TFLayoutLMvaForSequenceClassification(config=lowercase__ )
SCREAMING_SNAKE_CASE_ : List[str] = model(
lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ , training=lowercase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.num_labels
SCREAMING_SNAKE_CASE_ : Any = TFLayoutLMvaForTokenClassification(config=lowercase__ )
SCREAMING_SNAKE_CASE_ : int = model(
lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ , training=lowercase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = 2
SCREAMING_SNAKE_CASE_ : List[Any] = TFLayoutLMvaForQuestionAnswering(config=lowercase__ )
SCREAMING_SNAKE_CASE_ : int = model(
lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , start_positions=lowercase__ , end_positions=lowercase__ , training=lowercase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE_), (SCREAMING_SNAKE_CASE_), (SCREAMING_SNAKE_CASE_), (SCREAMING_SNAKE_CASE_), (SCREAMING_SNAKE_CASE_), (SCREAMING_SNAKE_CASE_), (SCREAMING_SNAKE_CASE_), (SCREAMING_SNAKE_CASE_)) : Any = config_and_inputs
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase,_UpperCAmelCase,unittest.TestCase ):
_A = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
_A = (
{"document-question-answering": TFLayoutLMvaForQuestionAnswering, "feature-extraction": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
_A = False
_A = False
_A = False
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
return True
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = copy.deepcopy(lowercase__ )
if model_class in get_values(lowercase__ ):
SCREAMING_SNAKE_CASE_ : str = {
k: tf.tile(tf.expand_dims(lowercase__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(lowercase__ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowercase__ ):
SCREAMING_SNAKE_CASE_ : Tuple = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowercase__ ):
SCREAMING_SNAKE_CASE_ : Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
SCREAMING_SNAKE_CASE_ : List[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowercase__ ):
SCREAMING_SNAKE_CASE_ : List[str] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowercase__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = TFLayoutLMvaModelTester(self )
SCREAMING_SNAKE_CASE_ : int = ConfigTester(self , config_class=lowercase__ , hidden_size=37 )
def __lowerCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : int = model_class(lowercase__ )
if getattr(lowercase__ , "hf_compute_loss" , lowercase__ ):
# The number of elements in the loss should be the same as the number of elements in the label
SCREAMING_SNAKE_CASE_ : Dict = self._prepare_for_class(inputs_dict.copy() , lowercase__ , return_labels=lowercase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=lowercase__ )[0]
]
SCREAMING_SNAKE_CASE_ : Any = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
SCREAMING_SNAKE_CASE_ : Dict = self._prepare_for_class(inputs_dict.copy() , lowercase__ , return_labels=lowercase__ )
SCREAMING_SNAKE_CASE_ : List[str] = prepared_for_class.pop("input_ids" )
SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase__ , **lowercase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
SCREAMING_SNAKE_CASE_ : List[Any] = self._prepare_for_class(inputs_dict.copy() , lowercase__ , return_labels=lowercase__ )
SCREAMING_SNAKE_CASE_ : int = prepared_for_class.pop("input_ids" )
if "labels" in prepared_for_class:
SCREAMING_SNAKE_CASE_ : str = prepared_for_class["labels"].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
SCREAMING_SNAKE_CASE_ : str = -100
SCREAMING_SNAKE_CASE_ : str = tf.convert_to_tensor(lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = model(lowercase__ , **lowercase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
SCREAMING_SNAKE_CASE_ : List[Any] = self._prepare_for_class(inputs_dict.copy() , lowercase__ , return_labels=lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = model(lowercase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
SCREAMING_SNAKE_CASE_ : Optional[Any] = self._prepare_for_class(inputs_dict.copy() , lowercase__ , return_labels=lowercase__ )
# Get keys that were added with the _prepare_for_class function
SCREAMING_SNAKE_CASE_ : int = prepared_for_class.keys() - inputs_dict.keys()
SCREAMING_SNAKE_CASE_ : Optional[int] = inspect.signature(model.call ).parameters
SCREAMING_SNAKE_CASE_ : Tuple = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
SCREAMING_SNAKE_CASE_ : List[Any] = {0: "input_ids"}
for label_key in label_keys:
SCREAMING_SNAKE_CASE_ : Optional[int] = signature_names.index(lowercase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = label_key
SCREAMING_SNAKE_CASE_ : List[str] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
SCREAMING_SNAKE_CASE_ : List[str] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
SCREAMING_SNAKE_CASE_ : List[str] = prepared_for_class[value]
SCREAMING_SNAKE_CASE_ : List[Any] = tuple(lowercase__ )
# Send to model
SCREAMING_SNAKE_CASE_ : int = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def __lowerCamelCase ( self ):
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
),
) : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
),
) : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE_ : List[str] = type
self.model_tester.create_and_check_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
),
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
),
) : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
),
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
@slow
def __lowerCamelCase ( self ):
"""simple docstring"""
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TFLayoutLMvaModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
def __lowerCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def __lowerCamelCase ( self ):
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=lowercase__ ) if is_vision_available() else None
@slow
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = TFLayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" )
SCREAMING_SNAKE_CASE_ : Any = self.default_image_processor
SCREAMING_SNAKE_CASE_ : Optional[int] = prepare_img()
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_processor(images=lowercase__ , return_tensors="tf" ).pixel_values
SCREAMING_SNAKE_CASE_ : Dict = tf.constant([[1, 2]] )
SCREAMING_SNAKE_CASE_ : Any = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
SCREAMING_SNAKE_CASE_ : List[Any] = model(input_ids=lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ , training=lowercase__ )
# verify the logits
SCREAMING_SNAKE_CASE_ : Tuple = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , lowercase__ )
SCREAMING_SNAKE_CASE_ : int = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase__ , atol=1e-4 ) )
| 68 | 1 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
snake_case_ = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> List[Any]:
"""simple docstring"""
for attribute in key.split("." ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
SCREAMING_SNAKE_CASE_ : List[str] = "lm_head"
SCREAMING_SNAKE_CASE_ : Any = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if weight_type is not None:
SCREAMING_SNAKE_CASE_ : Any = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).shape
else:
SCREAMING_SNAKE_CASE_ : Dict = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
SCREAMING_SNAKE_CASE_ : Dict = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE_ : Optional[Any] = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = value
else:
SCREAMING_SNAKE_CASE_ : Any = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = []
SCREAMING_SNAKE_CASE_ : Union[str, Any] = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE_ : Tuple = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , hf_model.config.feat_extract_norm == "group" , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
SCREAMING_SNAKE_CASE_ : Any = "unispeech." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
SCREAMING_SNAKE_CASE_ : Tuple = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE_ : Tuple = name.split(SCREAMING_SNAKE_CASE_ )[0].split("." )[-2]
SCREAMING_SNAKE_CASE_ : Any = mapped_key.replace("*" , SCREAMING_SNAKE_CASE_ )
if "weight_g" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = "weight_g"
elif "weight_v" in name:
SCREAMING_SNAKE_CASE_ : str = "weight_v"
elif "bias" in name:
SCREAMING_SNAKE_CASE_ : Tuple = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
SCREAMING_SNAKE_CASE_ : str = "weight"
else:
SCREAMING_SNAKE_CASE_ : List[str] = None
set_recursively(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE_ )
logger.warning(F"Unused weights: {unused_weights}" )
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = full_name.split("conv_layers." )[-1]
SCREAMING_SNAKE_CASE_ : List[Any] = name.split("." )
SCREAMING_SNAKE_CASE_ : Optional[Any] = int(items[0] )
SCREAMING_SNAKE_CASE_ : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
SCREAMING_SNAKE_CASE_ : Optional[int] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
SCREAMING_SNAKE_CASE_ : List[str] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
SCREAMING_SNAKE_CASE_ : List[str] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
SCREAMING_SNAKE_CASE_ : Optional[int] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : List[str]=True ) -> List[Any]:
"""simple docstring"""
if config_path is not None:
SCREAMING_SNAKE_CASE_ : Tuple = UniSpeechConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
else:
SCREAMING_SNAKE_CASE_ : Any = UniSpeechConfig()
if is_finetuned:
if dict_path:
SCREAMING_SNAKE_CASE_ : List[str] = Dictionary.load_from_json(SCREAMING_SNAKE_CASE_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
SCREAMING_SNAKE_CASE_ : Optional[Any] = target_dict.pad_index
SCREAMING_SNAKE_CASE_ : int = target_dict.bos_index
SCREAMING_SNAKE_CASE_ : Union[str, Any] = target_dict.eos_index
SCREAMING_SNAKE_CASE_ : List[Any] = len(target_dict.symbols )
SCREAMING_SNAKE_CASE_ : Optional[int] = os.path.join(SCREAMING_SNAKE_CASE_ , "vocab.json" )
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(SCREAMING_SNAKE_CASE_ ) )
return
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : str = target_dict.indices
# fairseq has the <pad> and <s> switched
SCREAMING_SNAKE_CASE_ : Optional[Any] = 4_2
SCREAMING_SNAKE_CASE_ : Any = 4_3
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = WavaVecaPhonemeCTCTokenizer(
SCREAMING_SNAKE_CASE_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=SCREAMING_SNAKE_CASE_ , )
SCREAMING_SNAKE_CASE_ : Tuple = True if config.feat_extract_norm == "layer" else False
SCREAMING_SNAKE_CASE_ : Dict = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , )
SCREAMING_SNAKE_CASE_ : int = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Any = UniSpeechForCTC(SCREAMING_SNAKE_CASE_ )
else:
SCREAMING_SNAKE_CASE_ : Dict = UniSpeechForPreTraining(SCREAMING_SNAKE_CASE_ )
if is_finetuned:
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] ), "w2v_path": checkpoint_path} )
else:
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
SCREAMING_SNAKE_CASE_ : int = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
hf_unispeech.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
snake_case_ = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 68 |
'''simple docstring'''
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = [1]
for i in range(2 , SCREAMING_SNAKE_CASE_ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
SCREAMING_SNAKE_CASE_ : Dict = list(range(SCREAMING_SNAKE_CASE_ ) )
# Find permutation
while factorials:
SCREAMING_SNAKE_CASE_ : Any = factorials.pop()
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Optional[int] = divmod(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 68 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = TFXLMRobertaModel.from_pretrained("jplu/tf-xlm-roberta-base" )
SCREAMING_SNAKE_CASE_ : Tuple = {
"input_ids": tf.convert_to_tensor([[0, 2646, 1_0269, 83, 9_9942, 2]] , dtype=tf.intaa ), # "My dog is cute"
"attention_mask": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
SCREAMING_SNAKE_CASE_ : Any = model(lowercase__ )["last_hidden_state"]
SCREAMING_SNAKE_CASE_ : Any = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , lowercase__ )
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE_ : Dict = tf.convert_to_tensor(
[
[
[0.0681762, 0.10894451, 0.06772504],
[-0.06423668, 0.02366615, 0.04329344],
[-0.06057295, 0.09974135, -0.00070584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 68 |
'''simple docstring'''
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--original_config_file',
type=str,
required=True,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--image_size',
default=5_1_2,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Dict:
"""simple docstring"""
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F"could not parse string as bool {string}" )
parser.add_argument(
'--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool
)
parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int)
snake_case_ = parser.parse_args()
snake_case_ = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 68 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'Intel/dpt-large': 'https://huggingface.co/Intel/dpt-large/resolve/main/config.json',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
_A = "dpt"
def __init__( self , lowercase__=768 , lowercase__=12 , lowercase__=12 , lowercase__=3072 , lowercase__="gelu" , lowercase__=0.0 , lowercase__=0.0 , lowercase__=0.02 , lowercase__=1e-12 , lowercase__=384 , lowercase__=16 , lowercase__=3 , lowercase__=False , lowercase__=True , lowercase__=[2, 5, 8, 11] , lowercase__="project" , lowercase__=[4, 2, 1, 0.5] , lowercase__=[96, 192, 384, 768] , lowercase__=256 , lowercase__=-1 , lowercase__=False , lowercase__=True , lowercase__=0.4 , lowercase__=255 , lowercase__=0.1 , lowercase__=[1, 1024, 24, 24] , lowercase__=[0, 1] , lowercase__=None , **lowercase__ , ):
"""simple docstring"""
super().__init__(**lowercase__ )
SCREAMING_SNAKE_CASE_ : Any = hidden_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("Initializing the config with a `BiT` backbone." )
SCREAMING_SNAKE_CASE_ : Tuple = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BitConfig(**lowercase__ )
elif isinstance(lowercase__ , lowercase__ ):
logger.info("Initializing the config with a `BiT` backbone." )
SCREAMING_SNAKE_CASE_ : Dict = BitConfig(**lowercase__ )
elif isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = backbone_config
else:
raise ValueError(
F"backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}." )
SCREAMING_SNAKE_CASE_ : List[Any] = backbone_featmap_shape
SCREAMING_SNAKE_CASE_ : Union[str, Any] = neck_ignore_stages
if readout_type != "project":
raise ValueError("Readout type must be 'project' when using `DPT-hybrid` mode." )
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
SCREAMING_SNAKE_CASE_ : int = None
SCREAMING_SNAKE_CASE_ : List[Any] = []
SCREAMING_SNAKE_CASE_ : int = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Tuple = intermediate_size
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE_ : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : str = initializer_range
SCREAMING_SNAKE_CASE_ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Any = image_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = patch_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = qkv_bias
SCREAMING_SNAKE_CASE_ : Optional[Any] = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("Readout_type must be one of ['ignore', 'add', 'project']" )
SCREAMING_SNAKE_CASE_ : Any = readout_type
SCREAMING_SNAKE_CASE_ : Optional[Any] = reassemble_factors
SCREAMING_SNAKE_CASE_ : str = neck_hidden_sizes
SCREAMING_SNAKE_CASE_ : Union[str, Any] = fusion_hidden_size
SCREAMING_SNAKE_CASE_ : Any = head_in_index
SCREAMING_SNAKE_CASE_ : str = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
SCREAMING_SNAKE_CASE_ : List[Any] = use_auxiliary_head
SCREAMING_SNAKE_CASE_ : int = auxiliary_loss_weight
SCREAMING_SNAKE_CASE_ : Union[str, Any] = semantic_loss_ignore_index
SCREAMING_SNAKE_CASE_ : Any = semantic_classifier_dropout
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
SCREAMING_SNAKE_CASE_ : List[str] = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE_ : Optional[int] = self.__class__.model_type
return output
| 68 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
_A = "camembert"
def __init__( self , lowercase__=3_0522 , lowercase__=768 , lowercase__=12 , lowercase__=12 , lowercase__=3072 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=512 , lowercase__=2 , lowercase__=0.02 , lowercase__=1e-12 , lowercase__=1 , lowercase__=0 , lowercase__=2 , lowercase__="absolute" , lowercase__=True , lowercase__=None , **lowercase__ , ):
"""simple docstring"""
super().__init__(pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__ )
SCREAMING_SNAKE_CASE_ : Tuple = vocab_size
SCREAMING_SNAKE_CASE_ : str = hidden_size
SCREAMING_SNAKE_CASE_ : str = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Dict = num_attention_heads
SCREAMING_SNAKE_CASE_ : Any = hidden_act
SCREAMING_SNAKE_CASE_ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : List[str] = type_vocab_size
SCREAMING_SNAKE_CASE_ : Tuple = initializer_range
SCREAMING_SNAKE_CASE_ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : List[Any] = position_embedding_type
SCREAMING_SNAKE_CASE_ : Any = use_cache
SCREAMING_SNAKE_CASE_ : Optional[int] = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE_ : Any = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 68 | 1 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def __lowerCamelCase ( ) -> List[str]:
"""simple docstring"""
raise RuntimeError("CUDA out of memory." )
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ : int = nn.Linear(3 , 4 )
SCREAMING_SNAKE_CASE_ : Any = nn.BatchNormad(4 )
SCREAMING_SNAKE_CASE_ : Optional[int] = nn.Linear(4 , 5 )
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(lowercase__ ) ) )
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(lowercase__ ):
nonlocal batch_sizes
batch_sizes.append(lowercase__ )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(lowercase__ , [128, 64, 32, 16, 8] )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(lowercase__ , lowercase__ ):
nonlocal batch_sizes
batch_sizes.append(lowercase__ )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Tuple = mock_training_loop_function("hello" )
self.assertListEqual(lowercase__ , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, "hello"] )
def __lowerCamelCase ( self ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(lowercase__ ):
pass
with self.assertRaises(lowercase__ ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] )
def __lowerCamelCase ( self ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowercase__ ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(lowercase__ ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] )
def __lowerCamelCase ( self ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(lowercase__ , lowercase__ , lowercase__ ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(lowercase__ ) as cm:
mock_training_loop_function(128 , "hello" , "world" )
self.assertIn("Batch size was passed into `f`" , cm.exception.args[0] )
self.assertIn("`f(arg1='hello', arg2='world')" , cm.exception.args[0] )
def __lowerCamelCase ( self ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowercase__ ):
raise ValueError("Oops, we had an error!" )
with self.assertRaises(lowercase__ ) as cm:
mock_training_loop_function()
self.assertIn("Oops, we had an error!" , cm.exception.args[0] )
@require_cuda
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.cuda.memory_allocated()
SCREAMING_SNAKE_CASE_ : Any = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , lowercase__ )
SCREAMING_SNAKE_CASE_ : Any = release_memory(lowercase__ )
self.assertEqual(torch.cuda.memory_allocated() , lowercase__ )
| 68 |
'''simple docstring'''
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : list[int] ) -> list[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = len(SCREAMING_SNAKE_CASE_ )
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE_ ):
if numbers[j] < numbers[i]:
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Tuple = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
snake_case_ = input('Enter numbers separated by a comma:\n').strip()
snake_case_ = [int(item) for item in user_input.split(',')]
print(exchange_sort(unsorted))
| 68 | 1 |
'''simple docstring'''
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : list[list[int | float]] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = len(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = len(matrix[0] )
SCREAMING_SNAKE_CASE_ : Tuple = min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for row in range(SCREAMING_SNAKE_CASE_ ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = matrix[col][row] / matrix[row][row]
for i in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
SCREAMING_SNAKE_CASE_ : List[str] = True
for i in range(row + 1 , SCREAMING_SNAKE_CASE_ ):
if matrix[i][row] != 0:
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : int = matrix[i], matrix[row]
SCREAMING_SNAKE_CASE_ : Optional[int] = False
break
if reduce:
rank -= 1
for i in range(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ : List[str] = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 68 |
'''simple docstring'''
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
snake_case_ = logging.getLogger()
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Path , SCREAMING_SNAKE_CASE_ : list ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = "\n".join(SCREAMING_SNAKE_CASE_ )
Path(SCREAMING_SNAKE_CASE_ ).open("w" ).writelines(SCREAMING_SNAKE_CASE_ )
snake_case_ = 'patrickvonplaten/t5-tiny-random'
snake_case_ = 'sshleifer/bart-tiny-random'
snake_case_ = 'sshleifer/tiny-mbart'
snake_case_ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source"
SCREAMING_SNAKE_CASE_ : List[str] = input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
SCREAMING_SNAKE_CASE_ : Dict = [" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."]
_dump_articles(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE_ : Tuple = str(Path(self.get_auto_remove_tmp_dir() ) / "scores.json" )
SCREAMING_SNAKE_CASE_ : Tuple = "translation_en_to_de" if model == T5_TINY else "summarization"
SCREAMING_SNAKE_CASE_ : Dict = F"\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n ".split()
with patch.object(lowercase__ , "argv" , lowercase__ ):
run_generate()
assert Path(lowercase__ ).exists()
# os.remove(Path(output_file_name))
def __lowerCamelCase ( self ):
"""simple docstring"""
self.run_eval_tester(lowercase__ )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
self.run_eval_tester(lowercase__ )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source"
SCREAMING_SNAKE_CASE_ : Optional[Any] = input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
SCREAMING_SNAKE_CASE_ : List[Any] = {
"en": ["Machine learning is great, isn't it?", "I like to eat bananas", "Tomorrow is another great day!"],
"de": [
"Maschinelles Lernen ist großartig, oder?",
"Ich esse gerne Bananen",
"Morgen ist wieder ein toller Tag!",
],
}
SCREAMING_SNAKE_CASE_ : Dict = Path(self.get_auto_remove_tmp_dir() )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = str(tmp_dir / "scores.json" )
SCREAMING_SNAKE_CASE_ : List[Any] = str(tmp_dir / "val.target" )
_dump_articles(lowercase__ , text["en"] )
_dump_articles(lowercase__ , text["de"] )
SCREAMING_SNAKE_CASE_ : List[Any] = "translation_en_to_de" if model == T5_TINY else "summarization"
SCREAMING_SNAKE_CASE_ : List[str] = F"\n run_eval_search.py\n {model}\n {str(lowercase__ )}\n {str(lowercase__ )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n ".split()
testargs.extend(["--search", "num_beams=1:2 length_penalty=0.9:1.0"] )
with patch.object(lowercase__ , "argv" , lowercase__ ):
with CaptureStdout() as cs:
run_search()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [" num_beams | length_penalty", model, "Best score args"]
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["Info"]
if "translation" in task:
expected_strings.append("bleu" )
else:
expected_strings.extend(lowercase__ )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(lowercase__ ).exists()
os.remove(Path(lowercase__ ) )
| 68 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
snake_case_ = logging.get_logger(__name__)
snake_case_ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
snake_case_ = {
'vocab_file': {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'
),
'distilbert-base-german-cased': (
'https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'
),
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'
),
},
}
snake_case_ = {
'distilbert-base-uncased': 5_1_2,
'distilbert-base-uncased-distilled-squad': 5_1_2,
'distilbert-base-cased': 5_1_2,
'distilbert-base-cased-distilled-squad': 5_1_2,
'distilbert-base-german-cased': 5_1_2,
'distilbert-base-multilingual-cased': 5_1_2,
}
snake_case_ = {
'distilbert-base-uncased': {'do_lower_case': True},
'distilbert-base-uncased-distilled-squad': {'do_lower_case': True},
'distilbert-base-cased': {'do_lower_case': False},
'distilbert-base-cased-distilled-squad': {'do_lower_case': False},
'distilbert-base-german-cased': {'do_lower_case': False},
'distilbert-base-multilingual-cased': {'do_lower_case': False},
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = PRETRAINED_INIT_CONFIGURATION
_A = ["input_ids", "attention_mask"]
_A = DistilBertTokenizer
def __init__( self , lowercase__=None , lowercase__=None , lowercase__=True , lowercase__="[UNK]" , lowercase__="[SEP]" , lowercase__="[PAD]" , lowercase__="[CLS]" , lowercase__="[MASK]" , lowercase__=True , lowercase__=None , **lowercase__ , ):
"""simple docstring"""
super().__init__(
lowercase__ , tokenizer_file=lowercase__ , do_lower_case=lowercase__ , unk_token=lowercase__ , sep_token=lowercase__ , pad_token=lowercase__ , cls_token=lowercase__ , mask_token=lowercase__ , tokenize_chinese_chars=lowercase__ , strip_accents=lowercase__ , **lowercase__ , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowercase__ ) != do_lower_case
or normalizer_state.get("strip_accents" , lowercase__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowercase__ ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE_ : Dict = getattr(lowercase__ , normalizer_state.pop("type" ) )
SCREAMING_SNAKE_CASE_ : Optional[int] = do_lower_case
SCREAMING_SNAKE_CASE_ : Union[str, Any] = strip_accents
SCREAMING_SNAKE_CASE_ : List[Any] = tokenize_chinese_chars
SCREAMING_SNAKE_CASE_ : int = normalizer_class(**lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = do_lower_case
def __lowerCamelCase ( self , lowercase__ , lowercase__=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCamelCase ( self , lowercase__ , lowercase__ = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCamelCase ( self , lowercase__ , lowercase__ = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._tokenizer.model.save(lowercase__ , name=lowercase__ )
return tuple(lowercase__ )
| 68 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : NDArray[floataa] , SCREAMING_SNAKE_CASE_ : NDArray[floataa] , SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : int , ) -> list[float]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : str = coefficient_matrix.shape
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : List[Any] = constant_matrix.shape
if rowsa != colsa:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = F"Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"
raise ValueError(SCREAMING_SNAKE_CASE_ )
if colsa != 1:
SCREAMING_SNAKE_CASE_ : List[Any] = F"Constant matrix must be nx1 but received {rowsa}x{colsa}"
raise ValueError(SCREAMING_SNAKE_CASE_ )
if rowsa != rowsa:
SCREAMING_SNAKE_CASE_ : Any = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
F"received {rowsa}x{colsa} and {rowsa}x{colsa}"
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) != rowsa:
SCREAMING_SNAKE_CASE_ : int = (
"Number of initial values must be equal to number of rows in coefficient "
F"matrix but received {len(SCREAMING_SNAKE_CASE_ )} and {rowsa}"
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
SCREAMING_SNAKE_CASE_ : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Any = table.shape
strictly_diagonally_dominant(SCREAMING_SNAKE_CASE_ )
# Iterates the whole matrix for given number of times
for _ in range(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ : Tuple = []
for row in range(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ : Any = 0
for col in range(SCREAMING_SNAKE_CASE_ ):
if col == row:
SCREAMING_SNAKE_CASE_ : Any = table[row][col]
elif col == cols - 1:
SCREAMING_SNAKE_CASE_ : Dict = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
SCREAMING_SNAKE_CASE_ : Optional[Any] = (temp + val) / denom
new_val.append(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = new_val
return [float(SCREAMING_SNAKE_CASE_ ) for i in new_val]
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : NDArray[floataa] ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Dict = table.shape
SCREAMING_SNAKE_CASE_ : Tuple = True
for i in range(0 , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ : int = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 68 | 1 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
snake_case_ = logging.get_logger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
_A = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self , **lowercase__ ):
"""simple docstring"""
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
SCREAMING_SNAKE_CASE_ : Optional[int] = deprecated_arg[3:]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = not kwargs.pop(lowercase__ )
logger.warning(
F"{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"
F" {positive_arg}={kwargs[positive_arg]}" )
SCREAMING_SNAKE_CASE_ : List[Any] = kwargs.pop("tpu_name" , self.tpu_name )
SCREAMING_SNAKE_CASE_ : Optional[int] = kwargs.pop("device_idx" , self.device_idx )
SCREAMING_SNAKE_CASE_ : int = kwargs.pop("eager_mode" , self.eager_mode )
SCREAMING_SNAKE_CASE_ : str = kwargs.pop("use_xla" , self.use_xla )
super().__init__(**lowercase__ )
_A = field(
default=_UpperCAmelCase,metadata={"help": "Name of TPU"},)
_A = field(
default=0,metadata={"help": "CPU / GPU device index. Defaults to 0."},)
_A = field(default=_UpperCAmelCase,metadata={"help": "Benchmark models in eager model."} )
_A = field(
default=_UpperCAmelCase,metadata={
"help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."
},)
@cached_property
def __lowerCamelCase ( self ):
"""simple docstring"""
requires_backends(self , ["tf"] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
if self.tpu:
try:
if self.tpu_name:
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
SCREAMING_SNAKE_CASE_ : str = None
return tpu
@cached_property
def __lowerCamelCase ( self ):
"""simple docstring"""
requires_backends(self , ["tf"] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
SCREAMING_SNAKE_CASE_ : Any = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU" )
SCREAMING_SNAKE_CASE_ : Dict = tf.distribute.OneDeviceStrategy(device=F"/gpu:{self.device_idx}" )
else:
tf.config.set_visible_devices([] , "GPU" ) # disable GPU
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.distribute.OneDeviceStrategy(device=F"/cpu:{self.device_idx}" )
return strategy
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
requires_backends(self , ["tf"] )
return self._setup_tpu is not None
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
requires_backends(self , ["tf"] )
return self._setup_strategy
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
requires_backends(self , ["tf"] )
return tf.config.list_physical_devices("GPU" )
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
requires_backends(self , ["tf"] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
return self.n_gpu > 0
| 68 |
'''simple docstring'''
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> int:
"""simple docstring"""
return 1 if input_a == input_a else 0
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 68 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = '▁'
snake_case_ = {'vocab_file': 'sentencepiece.bpe.model'}
snake_case_ = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model'
),
}
}
snake_case_ = {
'facebook/nllb-200-distilled-600M': 1_0_2_4,
}
# fmt: off
snake_case_ = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = ["input_ids", "attention_mask"]
_A = []
_A = []
def __init__( self , lowercase__ , lowercase__="<s>" , lowercase__="</s>" , lowercase__="</s>" , lowercase__="<s>" , lowercase__="<unk>" , lowercase__="<pad>" , lowercase__="<mask>" , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__ = None , lowercase__=None , lowercase__=False , **lowercase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else mask_token
SCREAMING_SNAKE_CASE_ : str = {} if sp_model_kwargs is None else sp_model_kwargs
SCREAMING_SNAKE_CASE_ : Optional[Any] = legacy_behaviour
super().__init__(
bos_token=lowercase__ , eos_token=lowercase__ , unk_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , tokenizer_file=lowercase__ , src_lang=lowercase__ , tgt_lang=lowercase__ , additional_special_tokens=lowercase__ , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=lowercase__ , **lowercase__ , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowercase__ ) )
SCREAMING_SNAKE_CASE_ : Tuple = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
SCREAMING_SNAKE_CASE_ : Any = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
SCREAMING_SNAKE_CASE_ : Dict = 1
SCREAMING_SNAKE_CASE_ : int = len(self.sp_model )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowercase__ )
}
SCREAMING_SNAKE_CASE_ : Optional[Any] = {v: k for k, v in self.lang_code_to_id.items()}
SCREAMING_SNAKE_CASE_ : List[Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
SCREAMING_SNAKE_CASE_ : Tuple = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
SCREAMING_SNAKE_CASE_ : Dict = src_lang if src_lang is not None else "eng_Latn"
SCREAMING_SNAKE_CASE_ : List[Any] = self.lang_code_to_id[self._src_lang]
SCREAMING_SNAKE_CASE_ : List[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
SCREAMING_SNAKE_CASE_ : Any = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
SCREAMING_SNAKE_CASE_ : str = {}
SCREAMING_SNAKE_CASE_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __lowerCamelCase ( self , lowercase__ , lowercase__ = None , lowercase__ = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase__ , token_ids_a=lowercase__ , already_has_special_tokens=lowercase__ )
SCREAMING_SNAKE_CASE_ : List[str] = [1] * len(self.prefix_tokens )
SCREAMING_SNAKE_CASE_ : Any = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowercase__ )) + suffix_ones
return prefix_ones + ([0] * len(lowercase__ )) + ([0] * len(lowercase__ )) + suffix_ones
def __lowerCamelCase ( self , lowercase__ , lowercase__ = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __lowerCamelCase ( self , lowercase__ , lowercase__ = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , **lowercase__ ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
SCREAMING_SNAKE_CASE_ : List[Any] = src_lang
SCREAMING_SNAKE_CASE_ : Tuple = self(lowercase__ , add_special_tokens=lowercase__ , return_tensors=lowercase__ , **lowercase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = self.convert_tokens_to_ids(lowercase__ )
SCREAMING_SNAKE_CASE_ : str = tgt_lang_id
return inputs
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = {self.convert_ids_to_tokens(lowercase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
return self.sp_model.encode(lowercase__ , out_type=lowercase__ )
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE_ : List[Any] = self.sp_model.PieceToId(lowercase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = "".join(lowercase__ ).replace(lowercase__ , " " ).strip()
return out_string
def __lowerCamelCase ( self , lowercase__ , lowercase__ = None ):
"""simple docstring"""
if not os.path.isdir(lowercase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE_ : Tuple = os.path.join(
lowercase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase__ , "wb" ) as fi:
SCREAMING_SNAKE_CASE_ : List[str] = self.sp_model.serialized_model_proto()
fi.write(lowercase__ )
return (out_vocab_file,)
def __lowerCamelCase ( self , lowercase__ , lowercase__ = "eng_Latn" , lowercase__ = None , lowercase__ = "fra_Latn" , **lowercase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = src_lang
SCREAMING_SNAKE_CASE_ : List[Any] = tgt_lang
return super().prepare_seqaseq_batch(lowercase__ , lowercase__ , **lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def __lowerCamelCase ( self ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
SCREAMING_SNAKE_CASE_ : Dict = []
SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [self.cur_lang_code]
SCREAMING_SNAKE_CASE_ : Dict = [self.eos_token_id]
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.lang_code_to_id[lang]
if self.legacy_behaviour:
SCREAMING_SNAKE_CASE_ : List[str] = []
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [self.eos_token_id, self.cur_lang_code]
else:
SCREAMING_SNAKE_CASE_ : Tuple = [self.cur_lang_code]
SCREAMING_SNAKE_CASE_ : List[str] = [self.eos_token_id]
| 68 |
'''simple docstring'''
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , ) -> float:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
SCREAMING_SNAKE_CASE_ : int = 1 - (matter_density + radiation_density + dark_energy)
SCREAMING_SNAKE_CASE_ : List[Any] = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
SCREAMING_SNAKE_CASE_ : Dict = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
snake_case_ = 0.3
print(
hubble_parameter(
hubble_constant=6_8.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 68 | 1 |
'''simple docstring'''
import math
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = input("Enter message: " )
SCREAMING_SNAKE_CASE_ : Tuple = int(input(F"Enter key [2-{len(SCREAMING_SNAKE_CASE_ ) - 1}]: " ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = input("Encryption/Decryption [e/d]: " )
if mode.lower().startswith("e" ):
SCREAMING_SNAKE_CASE_ : Tuple = encrypt_message(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif mode.lower().startswith("d" ):
SCREAMING_SNAKE_CASE_ : Tuple = decrypt_message(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(F"Output:\n{text + '|'}" )
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [""] * key
for col in range(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = col
while pointer < len(SCREAMING_SNAKE_CASE_ ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(SCREAMING_SNAKE_CASE_ )
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = math.ceil(len(SCREAMING_SNAKE_CASE_ ) / key )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = key
SCREAMING_SNAKE_CASE_ : Dict = (num_cols * num_rows) - len(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Dict = [""] * num_cols
SCREAMING_SNAKE_CASE_ : str = 0
SCREAMING_SNAKE_CASE_ : Dict = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
SCREAMING_SNAKE_CASE_ : Dict = 0
row += 1
return "".join(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 68 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = [[1, 2, 4], [1, 2, 3, 4]]
SCREAMING_SNAKE_CASE_ : Any = DisjunctiveConstraint(lowercase__ )
self.assertTrue(isinstance(dc.token_ids , lowercase__ ) )
with self.assertRaises(lowercase__ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(lowercase__ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(lowercase__ ):
DisjunctiveConstraint(lowercase__ ) # fails here
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = [[1, 2, 3], [1, 2, 4]]
SCREAMING_SNAKE_CASE_ : Optional[Any] = DisjunctiveConstraint(lowercase__ )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Dict = dc.update(1 )
SCREAMING_SNAKE_CASE_ : Optional[int] = stepped is True and completed is False and reset is False
self.assertTrue(lowercase__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Tuple = dc.update(2 )
SCREAMING_SNAKE_CASE_ : Tuple = stepped is True and completed is False and reset is False
self.assertTrue(lowercase__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Any = dc.update(3 )
SCREAMING_SNAKE_CASE_ : Tuple = stepped is True and completed is True and reset is False
self.assertTrue(lowercase__ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
SCREAMING_SNAKE_CASE_ : Dict = DisjunctiveConstraint(lowercase__ )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Dict = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : List[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Dict = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : List[Any] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Tuple = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : str = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : str = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 68 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
snake_case_ = logging.get_logger(__name__) # pylint: disable=invalid-name
snake_case_ = '\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")\n >>> pipe.to("cuda")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save("cat.png")\n ```\n'
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str=8 ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
SCREAMING_SNAKE_CASE_ : Optional[Any] = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
"""simple docstring"""
super().__init__()
self.register_modules(
text_encoder=lowercase__ , tokenizer=lowercase__ , unet=lowercase__ , scheduler=lowercase__ , movq=lowercase__ , )
SCREAMING_SNAKE_CASE_ : List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
if latents is None:
SCREAMING_SNAKE_CASE_ : Tuple = randn_tensor(lowercase__ , generator=lowercase__ , device=lowercase__ , dtype=lowercase__ )
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}" )
SCREAMING_SNAKE_CASE_ : List[str] = latents.to(lowercase__ )
SCREAMING_SNAKE_CASE_ : int = latents * scheduler.init_noise_sigma
return latents
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = len(lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else 1
# get prompt text embeddings
SCREAMING_SNAKE_CASE_ : Any = self.tokenizer(
lowercase__ , padding="max_length" , truncation=lowercase__ , max_length=77 , return_attention_mask=lowercase__ , add_special_tokens=lowercase__ , return_tensors="pt" , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = text_inputs.input_ids
SCREAMING_SNAKE_CASE_ : int = self.tokenizer(lowercase__ , padding="longest" , return_tensors="pt" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE_ : Tuple = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F" {self.tokenizer.model_max_length} tokens: {removed_text}" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = text_input_ids.to(lowercase__ )
SCREAMING_SNAKE_CASE_ : Tuple = text_inputs.attention_mask.to(lowercase__ )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Optional[int] = self.text_encoder(
input_ids=lowercase__ , attention_mask=lowercase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = prompt_embeds.repeat_interleave(lowercase__ , dim=0 )
SCREAMING_SNAKE_CASE_ : Tuple = text_encoder_hidden_states.repeat_interleave(lowercase__ , dim=0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = text_mask.repeat_interleave(lowercase__ , dim=0 )
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE_ : List[str]
if negative_prompt is None:
SCREAMING_SNAKE_CASE_ : int = [""] * batch_size
elif type(lowercase__ ) is not type(lowercase__ ):
raise TypeError(
F"`negative_prompt` should be the same type to `prompt`, but got {type(lowercase__ )} !="
F" {type(lowercase__ )}." )
elif isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = [negative_prompt]
elif batch_size != len(lowercase__ ):
raise ValueError(
F"`negative_prompt`: {negative_prompt} has batch size {len(lowercase__ )}, but `prompt`:"
F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`." )
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = negative_prompt
SCREAMING_SNAKE_CASE_ : str = self.tokenizer(
lowercase__ , padding="max_length" , max_length=77 , truncation=lowercase__ , return_attention_mask=lowercase__ , add_special_tokens=lowercase__ , return_tensors="pt" , )
SCREAMING_SNAKE_CASE_ : List[Any] = uncond_input.input_ids.to(lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = uncond_input.attention_mask.to(lowercase__ )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.text_encoder(
input_ids=lowercase__ , attention_mask=lowercase__ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
SCREAMING_SNAKE_CASE_ : List[Any] = negative_prompt_embeds.shape[1]
SCREAMING_SNAKE_CASE_ : Dict = negative_prompt_embeds.repeat(1 , lowercase__ )
SCREAMING_SNAKE_CASE_ : Tuple = negative_prompt_embeds.view(batch_size * num_images_per_prompt , lowercase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = uncond_text_encoder_hidden_states.shape[1]
SCREAMING_SNAKE_CASE_ : Tuple = uncond_text_encoder_hidden_states.repeat(1 , lowercase__ , 1 )
SCREAMING_SNAKE_CASE_ : str = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , lowercase__ , -1 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = uncond_text_mask.repeat_interleave(lowercase__ , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
SCREAMING_SNAKE_CASE_ : str = torch.cat([negative_prompt_embeds, prompt_embeds] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
SCREAMING_SNAKE_CASE_ : Dict = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def __lowerCamelCase ( self , lowercase__=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.device(F"cuda:{gpu_id}" )
SCREAMING_SNAKE_CASE_ : Optional[int] = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase__ , lowercase__ )
def __lowerCamelCase ( self , lowercase__=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
SCREAMING_SNAKE_CASE_ : List[str] = torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=lowercase__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : List[Any] = cpu_offload_with_hook(lowercase__ , lowercase__ , prev_module_hook=lowercase__ )
if self.safety_checker is not None:
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Optional[Any] = cpu_offload_with_hook(self.safety_checker , lowercase__ , prev_module_hook=lowercase__ )
# We'll offload the last model manually.
SCREAMING_SNAKE_CASE_ : int = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowerCamelCase ( self ):
"""simple docstring"""
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase__ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase__ )
def __call__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = 512 , lowercase__ = 512 , lowercase__ = 100 , lowercase__ = 4.0 , lowercase__ = 1 , lowercase__ = None , lowercase__ = None , lowercase__ = "pil" , lowercase__ = True , ):
"""simple docstring"""
if isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE_ : List[str] = 1
elif isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE_ : List[str] = len(lowercase__ )
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(lowercase__ )}" )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self._execution_device
SCREAMING_SNAKE_CASE_ : Dict = batch_size * num_images_per_prompt
SCREAMING_SNAKE_CASE_ : Optional[int] = guidance_scale > 1.0
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : str = self._encode_prompt(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
if isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE_ : int = torch.cat(lowercase__ , dim=0 )
if isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = torch.cat(lowercase__ , dim=0 )
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_embeds.repeat_interleave(lowercase__ , dim=0 )
SCREAMING_SNAKE_CASE_ : List[str] = negative_image_embeds.repeat_interleave(lowercase__ , dim=0 )
SCREAMING_SNAKE_CASE_ : List[str] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=lowercase__ )
self.scheduler.set_timesteps(lowercase__ , device=lowercase__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.scheduler.timesteps
SCREAMING_SNAKE_CASE_ : Tuple = self.unet.config.in_channels
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Dict = get_new_h_w(lowercase__ , lowercase__ , self.movq_scale_factor )
# create initial latent
SCREAMING_SNAKE_CASE_ : Tuple = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , lowercase__ , lowercase__ , lowercase__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowercase__ ) ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE_ : List[Any] = {"text_embeds": prompt_embeds, "image_embeds": image_embeds}
SCREAMING_SNAKE_CASE_ : Dict = self.unet(
sample=lowercase__ , timestep=lowercase__ , encoder_hidden_states=lowercase__ , added_cond_kwargs=lowercase__ , return_dict=lowercase__ , )[0]
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : List[Any] = noise_pred.split(latents.shape[1] , dim=1 )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Union[str, Any] = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : int = variance_pred.chunk(2 )
SCREAMING_SNAKE_CASE_ : Tuple = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
SCREAMING_SNAKE_CASE_ : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE_ : str = self.scheduler.step(
lowercase__ , lowercase__ , lowercase__ , generator=lowercase__ , ).prev_sample
# post-processing
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.movq.decode(lowercase__ , force_not_quantize=lowercase__ )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image * 0.5 + 0.5
SCREAMING_SNAKE_CASE_ : int = image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ : Dict = self.numpy_to_pil(lowercase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase__ )
| 68 |
'''simple docstring'''
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase,_UpperCAmelCase,unittest.TestCase ):
_A = VQModel
_A = "sample"
@property
def __lowerCamelCase ( self , lowercase__=(32, 32) ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = 4
SCREAMING_SNAKE_CASE_ : str = 3
SCREAMING_SNAKE_CASE_ : str = floats_tensor((batch_size, num_channels) + sizes ).to(lowercase__ )
return {"sample": image}
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
return (3, 32, 32)
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
return (3, 32, 32)
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 3,
}
SCREAMING_SNAKE_CASE_ : int = self.dummy_input
return init_dict, inputs_dict
def __lowerCamelCase ( self ):
"""simple docstring"""
pass
def __lowerCamelCase ( self ):
"""simple docstring"""
pass
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : int = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = VQModel.from_pretrained("fusing/vqgan-dummy" )
model.to(lowercase__ ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
SCREAMING_SNAKE_CASE_ : str = image.to(lowercase__ )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : int = model(lowercase__ ).sample
SCREAMING_SNAKE_CASE_ : Any = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] )
# fmt: on
self.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-3 ) )
| 68 | 1 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
snake_case_ = logging.getLogger()
@unittest.skip("Temporarily disable the doc tests." )
@require_torch
@require_tf
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = [file for file in os.listdir(lowercase__ ) if os.path.isfile(os.path.join(lowercase__ , lowercase__ ) )]
if identifier is not None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowercase__ , lowercase__ ):
for n_ in n_identifier:
SCREAMING_SNAKE_CASE_ : List[str] = [file for file in files if n_ not in file]
else:
SCREAMING_SNAKE_CASE_ : Any = [file for file in files if n_identifier not in file]
SCREAMING_SNAKE_CASE_ : str = ignore_files or []
ignore_files.append("__init__.py" )
SCREAMING_SNAKE_CASE_ : Optional[int] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing" , lowercase__ )
if only_modules:
SCREAMING_SNAKE_CASE_ : Optional[Any] = file.split("." )[0]
try:
SCREAMING_SNAKE_CASE_ : List[str] = getattr(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = doctest.DocTestSuite(lowercase__ )
SCREAMING_SNAKE_CASE_ : Any = unittest.TextTestRunner().run(lowercase__ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F"{module_identifier} is not a module." )
else:
SCREAMING_SNAKE_CASE_ : List[str] = doctest.testfile(str(".." / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = Path("src/transformers" )
SCREAMING_SNAKE_CASE_ : str = "modeling"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(lowercase__ , identifier=lowercase__ , ignore_files=lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = Path("src/transformers" )
SCREAMING_SNAKE_CASE_ : Optional[int] = "tokenization"
self.analyze_directory(lowercase__ , identifier=lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = Path("src/transformers" )
SCREAMING_SNAKE_CASE_ : Tuple = "configuration"
self.analyze_directory(lowercase__ , identifier=lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = Path("src/transformers" )
SCREAMING_SNAKE_CASE_ : Dict = ["configuration", "modeling", "tokenization"]
self.analyze_directory(lowercase__ , n_identifier=lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = Path("docs/source" )
SCREAMING_SNAKE_CASE_ : Any = ["favicon.ico"]
self.analyze_directory(lowercase__ , ignore_files=lowercase__ , only_modules=lowercase__ )
| 68 |
'''simple docstring'''
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = logging.get_logger()
# the current default level is logging.WARNING
SCREAMING_SNAKE_CASE_ : Optional[int] = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = logging.get_verbosity()
SCREAMING_SNAKE_CASE_ : List[Any] = logging.get_logger("transformers.models.bart.tokenization_bart" )
SCREAMING_SNAKE_CASE_ : List[Any] = "Testing 1, 2, 3"
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(lowercase__ ) as cl:
logger.warning(lowercase__ )
self.assertEqual(cl.out , msg + "\n" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(lowercase__ ) as cl:
logger.warning(lowercase__ )
self.assertEqual(cl.out , "" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(lowercase__ ) as cl:
logger.warning(lowercase__ )
self.assertEqual(cl.out , msg + "\n" )
# restore to the original level
logging.set_verbosity(lowercase__ )
@mockenv(TRANSFORMERS_VERBOSITY="error" )
def __lowerCamelCase ( self ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
SCREAMING_SNAKE_CASE_ : Tuple = logging.get_logger("transformers.models.bart.tokenization_bart" )
SCREAMING_SNAKE_CASE_ : int = os.getenv("TRANSFORMERS_VERBOSITY" , lowercase__ )
SCREAMING_SNAKE_CASE_ : Dict = logging.log_levels[env_level_str]
SCREAMING_SNAKE_CASE_ : str = logging.get_verbosity()
self.assertEqual(
lowercase__ , lowercase__ , F"TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}" , )
# restore to the original level
SCREAMING_SNAKE_CASE_ : Optional[int] = ""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="super-error" )
def __lowerCamelCase ( self ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
SCREAMING_SNAKE_CASE_ : List[Any] = logging.logging.getLogger()
with CaptureLogger(lowercase__ ) as cl:
# this action activates the env var
logging.get_logger("transformers.models.bart.tokenization_bart" )
self.assertIn("Unknown option TRANSFORMERS_VERBOSITY=super-error" , cl.out )
# no need to restore as nothing was changed
def __lowerCamelCase ( self ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
SCREAMING_SNAKE_CASE_ : str = logging.get_logger("transformers.models.bart.tokenization_bart" )
SCREAMING_SNAKE_CASE_ : List[Any] = "Testing 1, 2, 3"
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="1" ):
# nothing should be logged as env var disables this method
with CaptureLogger(lowercase__ ) as cl:
logger.warning_advice(lowercase__ )
self.assertEqual(cl.out , "" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(lowercase__ ) as cl:
logger.warning_advice(lowercase__ )
self.assertEqual(cl.out , msg + "\n" )
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 68 | 1 |
'''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def __lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
requests.request("GET" , "https://huggingface.co" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("GET" , "https://huggingface.co" , timeout=1.0 )
@pytest.mark.integration
def __lowerCamelCase ( ) -> int:
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("GET" , "https://huggingface.co" )
def __lowerCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
http_head("https://huggingface.co" )
| 700 |
'''simple docstring'''
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.nn.Linear(2 , 4 )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.optim.AdamW(model.parameters() , lr=1.0 )
SCREAMING_SNAKE_CASE_ : Any = torch.optim.lr_scheduler.OneCycleLR(SCREAMING_SNAKE_CASE_ , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
SCREAMING_SNAKE_CASE_ : Dict = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
SCREAMING_SNAKE_CASE_ : Tuple = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[str] ) -> Tuple:
"""simple docstring"""
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Any ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
@require_cuda
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(lowercase__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = Accelerator(cpu=lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Accelerator()
SCREAMING_SNAKE_CASE_ : Any = GradientState()
assert state.num_steps == 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 4
assert state.num_steps == 4
assert state.sync_gradients is True
SCREAMING_SNAKE_CASE_ : Optional[int] = False
assert state.sync_gradients is False
GradientState._reset_state()
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = Accelerator()
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Optional[int] = create_components()
(
(
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
),
) : Optional[Any] = accelerator.prepare(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = Accelerator()
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : str = create_components()
accelerator.prepare(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def __lowerCamelCase ( self ):
"""simple docstring"""
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*lowercase__ , **lowercase__ ):
pass
with patch("torch.cuda.set_device" , lowercase__ ), patch_environment(ACCELERATE_TORCH_DEVICE="cuda:64" ):
SCREAMING_SNAKE_CASE_ : List[str] = Accelerator()
self.assertEqual(str(accelerator.state.device ) , "cuda:64" )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = Accelerator()
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Dict = create_components()
accelerator.prepare(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = get_signature(lowercase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(lowercase__ )
# make sure random weights don't match
load_random_weights(lowercase__ )
self.assertTrue(abs(model_signature - get_signature(lowercase__ ) ) > 1e-3 )
# make sure loaded weights match
accelerator.load_state(lowercase__ )
self.assertTrue(abs(model_signature - get_signature(lowercase__ ) ) < 1e-3 )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = Accelerator()
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Optional[int] = create_components()
accelerator.prepare(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_signature(lowercase__ )
# saving hook
def save_config(lowercase__ , lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = {"class_name": models[0].__class__.__name__}
with open(os.path.join(lowercase__ , "data.json" ) , "w" ) as f:
json.dump(lowercase__ , lowercase__ )
# loading hook
def load_config(lowercase__ , lowercase__ ):
with open(os.path.join(lowercase__ , "data.json" ) , "r" ) as f:
SCREAMING_SNAKE_CASE_ : Any = json.load(lowercase__ )
SCREAMING_SNAKE_CASE_ : List[str] = config["class_name"]
SCREAMING_SNAKE_CASE_ : Dict = accelerator.register_save_state_pre_hook(lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = accelerator.register_load_state_pre_hook(lowercase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(lowercase__ )
# make sure random weights don't match with hooks
load_random_weights(lowercase__ )
self.assertTrue(abs(model_signature - get_signature(lowercase__ ) ) > 1e-3 )
# random class name to verify correct one is loaded
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "random"
# make sure loaded weights match with hooks
accelerator.load_state(lowercase__ )
self.assertTrue(abs(model_signature - get_signature(lowercase__ ) ) < 1e-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(lowercase__ )
# make sure random weights don't match with hooks removed
load_random_weights(lowercase__ )
self.assertTrue(abs(model_signature - get_signature(lowercase__ ) ) > 1e-3 )
# random class name to verify correct one is loaded
SCREAMING_SNAKE_CASE_ : Tuple = "random"
# make sure loaded weights match with hooks removed
accelerator.load_state(lowercase__ )
self.assertTrue(abs(model_signature - get_signature(lowercase__ ) ) < 1e-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = Accelerator()
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Tuple = create_components()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
# This should work
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Optional[int] = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
self.assertTrue(dummy_obj is None )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = Accelerator()
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Dict = create_components()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [1, 2, 3]
# This should work
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : int = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
self.assertEqual(
getattr(lowercase__ , "_is_accelerate_prepared" , lowercase__ ) , lowercase__ , "Dummy object should have `_is_accelerate_prepared` set to `True`" , )
self.assertEqual(
getattr(lowercase__ , "_is_accelerate_prepared" , lowercase__ ) , lowercase__ , "Model is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowercase__ , "_is_accelerate_prepared" , lowercase__ ) , lowercase__ , "Optimizer is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowercase__ , "_is_accelerate_prepared" , lowercase__ ) , lowercase__ , "Scheduler is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowercase__ , "_is_accelerate_prepared" , lowercase__ ) , lowercase__ , "Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowercase__ , "_is_accelerate_prepared" , lowercase__ ) , lowercase__ , "Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
@slow
@require_bnb
def __lowerCamelCase ( self ):
"""simple docstring"""
from transformers import AutoModelForCausalLM
SCREAMING_SNAKE_CASE_ : Optional[int] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=lowercase__ , device_map={"": 0} , )
SCREAMING_SNAKE_CASE_ : Optional[int] = Accelerator()
# This should work
SCREAMING_SNAKE_CASE_ : List[Any] = accelerator.prepare(lowercase__ )
@slow
@require_bnb
def __lowerCamelCase ( self ):
"""simple docstring"""
from transformers import AutoModelForCausalLM
SCREAMING_SNAKE_CASE_ : Optional[Any] = Accelerator()
with init_empty_weights():
SCREAMING_SNAKE_CASE_ : Tuple = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
SCREAMING_SNAKE_CASE_ : Optional[Any] = infer_auto_device_map(lowercase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = "cpu"
SCREAMING_SNAKE_CASE_ : Tuple = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , device_map=lowercase__ , load_in_abit=lowercase__ , llm_inta_enable_fpaa_cpu_offload=lowercase__ )
# This should not work and get value error
with self.assertRaises(lowercase__ ):
SCREAMING_SNAKE_CASE_ : str = accelerator.prepare(lowercase__ )
@slow
@require_bnb
@require_multi_gpu
def __lowerCamelCase ( self ):
"""simple docstring"""
from transformers import AutoModelForCausalLM
SCREAMING_SNAKE_CASE_ : str = {"distributed_type": DistributedType.MULTI_GPU}
with init_empty_weights():
SCREAMING_SNAKE_CASE_ : Optional[int] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
SCREAMING_SNAKE_CASE_ : str = infer_auto_device_map(lowercase__ )
SCREAMING_SNAKE_CASE_ : Dict = 1
SCREAMING_SNAKE_CASE_ : str = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=lowercase__ , device_map=lowercase__ , )
SCREAMING_SNAKE_CASE_ : Any = Accelerator()
# This should not work and get value error
with self.assertRaises(lowercase__ ):
SCREAMING_SNAKE_CASE_ : Tuple = accelerator.prepare(lowercase__ )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def __lowerCamelCase ( self ):
"""simple docstring"""
from transformers import AutoModelForCausalLM
with init_empty_weights():
SCREAMING_SNAKE_CASE_ : Optional[int] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = infer_auto_device_map(lowercase__ )
SCREAMING_SNAKE_CASE_ : List[str] = 1
SCREAMING_SNAKE_CASE_ : str = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=lowercase__ , device_map=lowercase__ , )
SCREAMING_SNAKE_CASE_ : Any = Accelerator()
# This should work
SCREAMING_SNAKE_CASE_ : Optional[int] = accelerator.prepare(lowercase__ )
@require_cuda
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = torch.nn.Linear(10 , 10 )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.optim.SGD(model.parameters() , lr=0.01 )
SCREAMING_SNAKE_CASE_ : Tuple = Accelerator(cpu=lowercase__ )
SCREAMING_SNAKE_CASE_ : Dict = accelerator.prepare(lowercase__ )
| 68 | 0 |
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
snake_case_ = logging.get_logger(__name__)
snake_case_ = OrderedDict(
[
('audio-spectrogram-transformer', 'ASTFeatureExtractor'),
('beit', 'BeitFeatureExtractor'),
('chinese_clip', 'ChineseCLIPFeatureExtractor'),
('clap', 'ClapFeatureExtractor'),
('clip', 'CLIPFeatureExtractor'),
('clipseg', 'ViTFeatureExtractor'),
('conditional_detr', 'ConditionalDetrFeatureExtractor'),
('convnext', 'ConvNextFeatureExtractor'),
('cvt', 'ConvNextFeatureExtractor'),
('data2vec-audio', 'Wav2Vec2FeatureExtractor'),
('data2vec-vision', 'BeitFeatureExtractor'),
('deformable_detr', 'DeformableDetrFeatureExtractor'),
('deit', 'DeiTFeatureExtractor'),
('detr', 'DetrFeatureExtractor'),
('dinat', 'ViTFeatureExtractor'),
('donut-swin', 'DonutFeatureExtractor'),
('dpt', 'DPTFeatureExtractor'),
('encodec', 'EncodecFeatureExtractor'),
('flava', 'FlavaFeatureExtractor'),
('glpn', 'GLPNFeatureExtractor'),
('groupvit', 'CLIPFeatureExtractor'),
('hubert', 'Wav2Vec2FeatureExtractor'),
('imagegpt', 'ImageGPTFeatureExtractor'),
('layoutlmv2', 'LayoutLMv2FeatureExtractor'),
('layoutlmv3', 'LayoutLMv3FeatureExtractor'),
('levit', 'LevitFeatureExtractor'),
('maskformer', 'MaskFormerFeatureExtractor'),
('mctct', 'MCTCTFeatureExtractor'),
('mobilenet_v1', 'MobileNetV1FeatureExtractor'),
('mobilenet_v2', 'MobileNetV2FeatureExtractor'),
('mobilevit', 'MobileViTFeatureExtractor'),
('nat', 'ViTFeatureExtractor'),
('owlvit', 'OwlViTFeatureExtractor'),
('perceiver', 'PerceiverFeatureExtractor'),
('poolformer', 'PoolFormerFeatureExtractor'),
('regnet', 'ConvNextFeatureExtractor'),
('resnet', 'ConvNextFeatureExtractor'),
('segformer', 'SegformerFeatureExtractor'),
('sew', 'Wav2Vec2FeatureExtractor'),
('sew-d', 'Wav2Vec2FeatureExtractor'),
('speech_to_text', 'Speech2TextFeatureExtractor'),
('speecht5', 'SpeechT5FeatureExtractor'),
('swiftformer', 'ViTFeatureExtractor'),
('swin', 'ViTFeatureExtractor'),
('swinv2', 'ViTFeatureExtractor'),
('table-transformer', 'DetrFeatureExtractor'),
('timesformer', 'VideoMAEFeatureExtractor'),
('tvlt', 'TvltFeatureExtractor'),
('unispeech', 'Wav2Vec2FeatureExtractor'),
('unispeech-sat', 'Wav2Vec2FeatureExtractor'),
('van', 'ConvNextFeatureExtractor'),
('videomae', 'VideoMAEFeatureExtractor'),
('vilt', 'ViltFeatureExtractor'),
('vit', 'ViTFeatureExtractor'),
('vit_mae', 'ViTFeatureExtractor'),
('vit_msn', 'ViTFeatureExtractor'),
('wav2vec2', 'Wav2Vec2FeatureExtractor'),
('wav2vec2-conformer', 'Wav2Vec2FeatureExtractor'),
('wavlm', 'Wav2Vec2FeatureExtractor'),
('whisper', 'WhisperFeatureExtractor'),
('xclip', 'CLIPFeatureExtractor'),
('yolos', 'YolosFeatureExtractor'),
]
)
snake_case_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : str ) -> Optional[int]:
"""simple docstring"""
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
SCREAMING_SNAKE_CASE_ : str = model_type_to_module_name(__A )
SCREAMING_SNAKE_CASE_ : int = importlib.import_module(F".{module_name}" , "transformers.models" )
try:
return getattr(__A , __A )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(__A , "__name__" , __A ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
SCREAMING_SNAKE_CASE_ : List[str] = importlib.import_module("transformers" )
if hasattr(__A , __A ):
return getattr(__A , __A )
return None
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Any = None , SCREAMING_SNAKE_CASE_ : Any = False , SCREAMING_SNAKE_CASE_ : Optional[int] = False , SCREAMING_SNAKE_CASE_ : Tuple = None , SCREAMING_SNAKE_CASE_ : Optional[Any] = None , SCREAMING_SNAKE_CASE_ : Any = None , SCREAMING_SNAKE_CASE_ : str = False , **SCREAMING_SNAKE_CASE_ : Dict , ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = get_file_from_repo(
__A , __A , cache_dir=__A , force_download=__A , resume_download=__A , proxies=__A , use_auth_token=__A , revision=__A , local_files_only=__A , )
if resolved_config_file is None:
logger.info(
"Could not locate the feature extractor configuration file, will try to use the model config instead." )
return {}
with open(__A , encoding="utf-8" ) as reader:
return json.load(__A )
class SCREAMING_SNAKE_CASE__ :
def __init__( self ):
"""simple docstring"""
raise EnvironmentError(
"AutoFeatureExtractor is designed to be instantiated "
"using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method." )
@classmethod
@replace_list_option_in_docstrings(lowerCAmelCase__ )
def __lowerCamelCase ( cls , lowercase__ , **lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = kwargs.pop("config" , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = kwargs.pop("trust_remote_code" , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : List[str] = FeatureExtractionMixin.get_feature_extractor_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = config_dict.get("feature_extractor_type" , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = None
if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ):
SCREAMING_SNAKE_CASE_ : str = config_dict["auto_map"]["AutoFeatureExtractor"]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : Tuple = AutoConfig.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
# It could be in `config.feature_extractor_type``
SCREAMING_SNAKE_CASE_ : str = getattr(lowerCAmelCase__ , "feature_extractor_type" , lowerCAmelCase__ )
if hasattr(lowerCAmelCase__ , "auto_map" ) and "AutoFeatureExtractor" in config.auto_map:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = config.auto_map["AutoFeatureExtractor"]
if feature_extractor_class is not None:
SCREAMING_SNAKE_CASE_ : Any = feature_extractor_class_from_name(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = feature_extractor_auto_map is not None
SCREAMING_SNAKE_CASE_ : Any = feature_extractor_class is not None or type(lowerCAmelCase__ ) in FEATURE_EXTRACTOR_MAPPING
SCREAMING_SNAKE_CASE_ : str = resolve_trust_remote_code(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if has_remote_code and trust_remote_code:
SCREAMING_SNAKE_CASE_ : Any = get_class_from_dynamic_module(
lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int = kwargs.pop("code_revision" , lowerCAmelCase__ )
if os.path.isdir(lowerCAmelCase__ ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(lowerCAmelCase__ ) in FEATURE_EXTRACTOR_MAPPING:
SCREAMING_SNAKE_CASE_ : Optional[Any] = FEATURE_EXTRACTOR_MAPPING[type(lowerCAmelCase__ )]
return feature_extractor_class.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
raise ValueError(
F"Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a "
F"`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following "
F"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}" )
@staticmethod
def __lowerCamelCase ( lowercase__ , lowercase__ ):
"""simple docstring"""
FEATURE_EXTRACTOR_MAPPING.register(lowerCAmelCase__ , lowerCAmelCase__ )
| 701 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'facebook/xmod-base': 'https://huggingface.co/facebook/xmod-base/resolve/main/config.json',
'facebook/xmod-large-prenorm': 'https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json',
'facebook/xmod-base-13-125k': 'https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json',
'facebook/xmod-base-30-125k': 'https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json',
'facebook/xmod-base-30-195k': 'https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json',
'facebook/xmod-base-60-125k': 'https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json',
'facebook/xmod-base-60-265k': 'https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json',
'facebook/xmod-base-75-125k': 'https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json',
'facebook/xmod-base-75-269k': 'https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
_A = "xmod"
def __init__( self , lowercase__=3_0522 , lowercase__=768 , lowercase__=12 , lowercase__=12 , lowercase__=3072 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=512 , lowercase__=2 , lowercase__=0.02 , lowercase__=1e-12 , lowercase__=1 , lowercase__=0 , lowercase__=2 , lowercase__="absolute" , lowercase__=True , lowercase__=None , lowercase__=False , lowercase__=2 , lowercase__=False , lowercase__=True , lowercase__=True , lowercase__=("en_XX",) , lowercase__=None , **lowercase__ , ):
"""simple docstring"""
super().__init__(pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__ )
SCREAMING_SNAKE_CASE_ : Tuple = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE_ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE_ : List[str] = hidden_act
SCREAMING_SNAKE_CASE_ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE_ : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Dict = type_vocab_size
SCREAMING_SNAKE_CASE_ : str = initializer_range
SCREAMING_SNAKE_CASE_ : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Tuple = position_embedding_type
SCREAMING_SNAKE_CASE_ : str = use_cache
SCREAMING_SNAKE_CASE_ : Optional[int] = classifier_dropout
SCREAMING_SNAKE_CASE_ : int = pre_norm
SCREAMING_SNAKE_CASE_ : Optional[int] = adapter_reduction_factor
SCREAMING_SNAKE_CASE_ : List[str] = adapter_layer_norm
SCREAMING_SNAKE_CASE_ : List[str] = adapter_reuse_layer_norm
SCREAMING_SNAKE_CASE_ : int = ln_before_adapter
SCREAMING_SNAKE_CASE_ : List[Any] = list(lowercase__ )
SCREAMING_SNAKE_CASE_ : Any = default_language
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ : Tuple = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 68 | 0 |
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
print("Making key files..." )
make_key_files("rsa" , 1_0_2_4 )
print("Key files generation successful." )
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> tuple[tuple[int, int], tuple[int, int]]:
"""simple docstring"""
print("Generating prime p..." )
SCREAMING_SNAKE_CASE_ : List[str] = rabinMiller.generate_large_prime(_SCREAMING_SNAKE_CASE )
print("Generating prime q..." )
SCREAMING_SNAKE_CASE_ : List[Any] = rabinMiller.generate_large_prime(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = p * q
print("Generating e that is relatively prime to (p - 1) * (q - 1)..." )
while True:
SCREAMING_SNAKE_CASE_ : Any = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(_SCREAMING_SNAKE_CASE , (p - 1) * (q - 1) ) == 1:
break
print("Calculating d that is mod inverse of e..." )
SCREAMING_SNAKE_CASE_ : Optional[int] = cryptoMath.find_mod_inverse(_SCREAMING_SNAKE_CASE , (p - 1) * (q - 1) )
SCREAMING_SNAKE_CASE_ : Optional[int] = (n, e)
SCREAMING_SNAKE_CASE_ : str = (n, d)
return (public_key, private_key)
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple ) -> None:
"""simple docstring"""
if os.path.exists(F"{name}_pubkey.txt" ) or os.path.exists(F"{name}_privkey.txt" ):
print("\nWARNING:" )
print(
F"\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"
"Use a different name or delete these files and re-run this program." )
sys.exit()
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Tuple = generate_key(_SCREAMING_SNAKE_CASE )
print(F"\nWriting public key to file {name}_pubkey.txt..." )
with open(F"{name}_pubkey.txt" , "w" ) as out_file:
out_file.write(F"{key_size},{public_key[0]},{public_key[1]}" )
print(F"Writing private key to file {name}_privkey.txt..." )
with open(F"{name}_privkey.txt" , "w" ) as out_file:
out_file.write(F"{key_size},{private_key[0]},{private_key[1]}" )
if __name__ == "__main__":
main()
| 702 |
'''simple docstring'''
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 68 | 0 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = FileLock(str(tmpdir / "foo.lock" ) )
SCREAMING_SNAKE_CASE_ : List[str] = FileLock(str(tmpdir / "foo.lock" ) )
SCREAMING_SNAKE_CASE_ : Dict = 0.01
with locka.acquire():
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ : Dict = time.time()
locka.acquire(SCREAMING_SNAKE_CASE_ )
assert time.time() - _start > timeout
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = "a" * 1_0_0_0 + ".lock"
SCREAMING_SNAKE_CASE_ : Tuple = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(".lock" )
assert not locka._lock_file.endswith(SCREAMING_SNAKE_CASE_ )
assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5
SCREAMING_SNAKE_CASE_ : Union[str, Any] = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
locka.acquire(0 )
| 703 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'Intel/dpt-large': 'https://huggingface.co/Intel/dpt-large/resolve/main/config.json',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
_A = "dpt"
def __init__( self , lowercase__=768 , lowercase__=12 , lowercase__=12 , lowercase__=3072 , lowercase__="gelu" , lowercase__=0.0 , lowercase__=0.0 , lowercase__=0.02 , lowercase__=1e-12 , lowercase__=384 , lowercase__=16 , lowercase__=3 , lowercase__=False , lowercase__=True , lowercase__=[2, 5, 8, 11] , lowercase__="project" , lowercase__=[4, 2, 1, 0.5] , lowercase__=[96, 192, 384, 768] , lowercase__=256 , lowercase__=-1 , lowercase__=False , lowercase__=True , lowercase__=0.4 , lowercase__=255 , lowercase__=0.1 , lowercase__=[1, 1024, 24, 24] , lowercase__=[0, 1] , lowercase__=None , **lowercase__ , ):
"""simple docstring"""
super().__init__(**lowercase__ )
SCREAMING_SNAKE_CASE_ : Any = hidden_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("Initializing the config with a `BiT` backbone." )
SCREAMING_SNAKE_CASE_ : Tuple = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BitConfig(**lowercase__ )
elif isinstance(lowercase__ , lowercase__ ):
logger.info("Initializing the config with a `BiT` backbone." )
SCREAMING_SNAKE_CASE_ : Dict = BitConfig(**lowercase__ )
elif isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = backbone_config
else:
raise ValueError(
F"backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}." )
SCREAMING_SNAKE_CASE_ : List[Any] = backbone_featmap_shape
SCREAMING_SNAKE_CASE_ : Union[str, Any] = neck_ignore_stages
if readout_type != "project":
raise ValueError("Readout type must be 'project' when using `DPT-hybrid` mode." )
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
SCREAMING_SNAKE_CASE_ : int = None
SCREAMING_SNAKE_CASE_ : List[Any] = []
SCREAMING_SNAKE_CASE_ : int = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Tuple = intermediate_size
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE_ : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : str = initializer_range
SCREAMING_SNAKE_CASE_ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Any = image_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = patch_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = qkv_bias
SCREAMING_SNAKE_CASE_ : Optional[Any] = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("Readout_type must be one of ['ignore', 'add', 'project']" )
SCREAMING_SNAKE_CASE_ : Any = readout_type
SCREAMING_SNAKE_CASE_ : Optional[Any] = reassemble_factors
SCREAMING_SNAKE_CASE_ : str = neck_hidden_sizes
SCREAMING_SNAKE_CASE_ : Union[str, Any] = fusion_hidden_size
SCREAMING_SNAKE_CASE_ : Any = head_in_index
SCREAMING_SNAKE_CASE_ : str = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
SCREAMING_SNAKE_CASE_ : List[Any] = use_auxiliary_head
SCREAMING_SNAKE_CASE_ : int = auxiliary_loss_weight
SCREAMING_SNAKE_CASE_ : Union[str, Any] = semantic_loss_ignore_index
SCREAMING_SNAKE_CASE_ : Any = semantic_classifier_dropout
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
SCREAMING_SNAKE_CASE_ : List[str] = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE_ : Optional[int] = self.__class__.model_type
return output
| 68 | 0 |
'''simple docstring'''
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = torch.load(_lowercase , map_location="cpu" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = chkpt['model']
# We have the base model one level deeper than the original XLM repository
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {}
for k, v in state_dict.items():
if "pred_layer" in k:
SCREAMING_SNAKE_CASE_ : int = v
else:
SCREAMING_SNAKE_CASE_ : Tuple = v
SCREAMING_SNAKE_CASE_ : Tuple = chkpt['params']
SCREAMING_SNAKE_CASE_ : Any = {n: v for n, v in config.items() if not isinstance(_lowercase , (torch.FloatTensor, numpy.ndarray) )}
SCREAMING_SNAKE_CASE_ : int = chkpt['dico_word2id']
SCREAMING_SNAKE_CASE_ : Any = {s + '</w>' if s.find("@@" ) == -1 and i > 1_3 else s.replace("@@" , "" ): i for s, i in vocab.items()}
# Save pytorch-model
SCREAMING_SNAKE_CASE_ : int = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pytorch_dump_folder_path + '/' + CONFIG_NAME
SCREAMING_SNAKE_CASE_ : str = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['vocab_file']
print(F"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(_lowercase , _lowercase )
print(F"Save configuration file to {pytorch_config_dump_path}" )
with open(_lowercase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(_lowercase , indent=2 ) + "\n" )
print(F"Save vocab file to {pytorch_config_dump_path}" )
with open(_lowercase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(_lowercase , indent=2 ) + "\n" )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
snake_case_ = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 704 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self , lowercase__ , lowercase__=7 , lowercase__=3 , lowercase__=18 , lowercase__=30 , lowercase__=400 , lowercase__=True , lowercase__=None , lowercase__=True , lowercase__=None , lowercase__=True , lowercase__=[0.48145466, 0.4578275, 0.40821073] , lowercase__=[0.26862954, 0.26130258, 0.27577711] , lowercase__=True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = size if size is not None else {"height": 224, "width": 224}
SCREAMING_SNAKE_CASE_ : Optional[Any] = crop_size if crop_size is not None else {"height": 18, "width": 18}
SCREAMING_SNAKE_CASE_ : str = parent
SCREAMING_SNAKE_CASE_ : List[Any] = batch_size
SCREAMING_SNAKE_CASE_ : Dict = num_channels
SCREAMING_SNAKE_CASE_ : Any = image_size
SCREAMING_SNAKE_CASE_ : Tuple = min_resolution
SCREAMING_SNAKE_CASE_ : Optional[Any] = max_resolution
SCREAMING_SNAKE_CASE_ : Tuple = do_resize
SCREAMING_SNAKE_CASE_ : List[str] = size
SCREAMING_SNAKE_CASE_ : str = do_center_crop
SCREAMING_SNAKE_CASE_ : List[str] = crop_size
SCREAMING_SNAKE_CASE_ : int = do_normalize
SCREAMING_SNAKE_CASE_ : Optional[int] = image_mean
SCREAMING_SNAKE_CASE_ : Dict = image_std
SCREAMING_SNAKE_CASE_ : List[Any] = do_convert_rgb
def __lowerCamelCase ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def __lowerCamelCase ( self , lowercase__=False , lowercase__=False , lowercase__=False ):
"""simple docstring"""
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
SCREAMING_SNAKE_CASE_ : str = []
for i in range(self.batch_size ):
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Optional[Any] = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
SCREAMING_SNAKE_CASE_ : str = [Image.fromarray(np.moveaxis(lowercase__ , 0 , -1 ) ) for x in image_inputs]
if torchify:
SCREAMING_SNAKE_CASE_ : List[str] = [torch.from_numpy(lowercase__ ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase,unittest.TestCase ):
_A = ChineseCLIPImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = ChineseCLIPImageProcessingTester(self , do_center_crop=lowercase__ )
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase__ , "do_resize" ) )
self.assertTrue(hasattr(lowercase__ , "size" ) )
self.assertTrue(hasattr(lowercase__ , "do_center_crop" ) )
self.assertTrue(hasattr(lowercase__ , "center_crop" ) )
self.assertTrue(hasattr(lowercase__ , "do_normalize" ) )
self.assertTrue(hasattr(lowercase__ , "image_mean" ) )
self.assertTrue(hasattr(lowercase__ , "image_std" ) )
self.assertTrue(hasattr(lowercase__ , "do_convert_rgb" ) )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 224, "width": 224} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
SCREAMING_SNAKE_CASE_ : str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def __lowerCamelCase ( self ):
"""simple docstring"""
pass
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ : Tuple = self.image_processor_tester.prepare_inputs(equal_resolution=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(lowercase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.image_processor_tester.prepare_inputs(equal_resolution=lowercase__ , numpify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ : List[str] = image_processing(lowercase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ : List[str] = self.image_processor_tester.prepare_inputs(equal_resolution=lowercase__ , torchify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ : int = image_processing(lowercase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase,unittest.TestCase ):
_A = ChineseCLIPImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=lowercase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 3
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase__ , "do_resize" ) )
self.assertTrue(hasattr(lowercase__ , "size" ) )
self.assertTrue(hasattr(lowercase__ , "do_center_crop" ) )
self.assertTrue(hasattr(lowercase__ , "center_crop" ) )
self.assertTrue(hasattr(lowercase__ , "do_normalize" ) )
self.assertTrue(hasattr(lowercase__ , "image_mean" ) )
self.assertTrue(hasattr(lowercase__ , "image_std" ) )
self.assertTrue(hasattr(lowercase__ , "do_convert_rgb" ) )
def __lowerCamelCase ( self ):
"""simple docstring"""
pass
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ : Optional[int] = self.image_processor_tester.prepare_inputs(equal_resolution=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ : List[str] = image_processing(lowercase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 68 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=lowercase__ )
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
_A = field(default="question-answering-extractive",metadata={"include_in_asdict_even_if_is_default": True} )
_A = Features({"question": Value("string" ), "context": Value("string" )} )
_A = Features(
{
"answers": Sequence(
{
"text": Value("string" ),
"answer_start": Value("int32" ),
} )
} )
_A = "question"
_A = "context"
_A = "answers"
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 705 |
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = str(SCREAMING_SNAKE_CASE_ )
return len(SCREAMING_SNAKE_CASE_ ) == 9 and set(SCREAMING_SNAKE_CASE_ ) == set("123456789" )
def __lowerCamelCase ( ) -> int | None:
"""simple docstring"""
for base_num in range(9_9_9_9 , 4_9_9_9 , -1 ):
SCREAMING_SNAKE_CASE_ : int = 1_0_0_0_0_2 * base_num
if is_9_pandigital(SCREAMING_SNAKE_CASE_ ):
return candidate
for base_num in range(3_3_3 , 9_9 , -1 ):
SCREAMING_SNAKE_CASE_ : List[str] = 1_0_0_2_0_0_3 * base_num
if is_9_pandigital(SCREAMING_SNAKE_CASE_ ):
return candidate
return None
if __name__ == "__main__":
print(F'''{solution() = }''')
| 68 | 0 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[str] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 3_8_4
if "tiny" in model_name:
SCREAMING_SNAKE_CASE_ : List[str] = [3, 3, 9, 3]
SCREAMING_SNAKE_CASE_ : int = [9_6, 1_9_2, 3_8_4, 7_6_8]
if "small" in model_name:
SCREAMING_SNAKE_CASE_ : Tuple = [3, 3, 2_7, 3]
SCREAMING_SNAKE_CASE_ : List[Any] = [9_6, 1_9_2, 3_8_4, 7_6_8]
if "base" in model_name:
SCREAMING_SNAKE_CASE_ : List[str] = [3, 3, 2_7, 3]
SCREAMING_SNAKE_CASE_ : Any = [1_2_8, 2_5_6, 5_1_2, 1_0_2_4]
SCREAMING_SNAKE_CASE_ : Optional[int] = 5_1_2
if "large" in model_name:
SCREAMING_SNAKE_CASE_ : str = [3, 3, 2_7, 3]
SCREAMING_SNAKE_CASE_ : str = [1_9_2, 3_8_4, 7_6_8, 1_5_3_6]
SCREAMING_SNAKE_CASE_ : str = 7_6_8
if "xlarge" in model_name:
SCREAMING_SNAKE_CASE_ : Optional[int] = [3, 3, 2_7, 3]
SCREAMING_SNAKE_CASE_ : Optional[int] = [2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8]
SCREAMING_SNAKE_CASE_ : List[str] = 1_0_2_4
# set label information
SCREAMING_SNAKE_CASE_ : Optional[Any] = 1_5_0
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'huggingface/label-files'
SCREAMING_SNAKE_CASE_ : Tuple = 'ade20k-id2label.json'
SCREAMING_SNAKE_CASE_ : List[Any] = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type="dataset" ) , "r" ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ : int = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ : int = ConvNextConfig(
depths=lowerCamelCase_ , hidden_sizes=lowerCamelCase_ , out_features=["stage1", "stage2", "stage3", "stage4"] )
SCREAMING_SNAKE_CASE_ : List[Any] = UperNetConfig(
backbone_config=lowerCamelCase_ , auxiliary_in_channels=lowerCamelCase_ , num_labels=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_ , )
return config
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Any ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = []
# fmt: off
# stem
rename_keys.append(("backbone.downsample_layers.0.0.weight", "backbone.embeddings.patch_embeddings.weight") )
rename_keys.append(("backbone.downsample_layers.0.0.bias", "backbone.embeddings.patch_embeddings.bias") )
rename_keys.append(("backbone.downsample_layers.0.1.weight", "backbone.embeddings.layernorm.weight") )
rename_keys.append(("backbone.downsample_layers.0.1.bias", "backbone.embeddings.layernorm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"backbone.stages.{i}.{j}.gamma", F"backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter") )
rename_keys.append((F"backbone.stages.{i}.{j}.depthwise_conv.weight", F"backbone.encoder.stages.{i}.layers.{j}.dwconv.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.depthwise_conv.bias", F"backbone.encoder.stages.{i}.layers.{j}.dwconv.bias") )
rename_keys.append((F"backbone.stages.{i}.{j}.norm.weight", F"backbone.encoder.stages.{i}.layers.{j}.layernorm.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.norm.bias", F"backbone.encoder.stages.{i}.layers.{j}.layernorm.bias") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv1.weight", F"backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv1.bias", F"backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv2.weight", F"backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv2.bias", F"backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias") )
if i > 0:
rename_keys.append((F"backbone.downsample_layers.{i}.0.weight", F"backbone.encoder.stages.{i}.downsampling_layer.0.weight") )
rename_keys.append((F"backbone.downsample_layers.{i}.0.bias", F"backbone.encoder.stages.{i}.downsampling_layer.0.bias") )
rename_keys.append((F"backbone.downsample_layers.{i}.1.weight", F"backbone.encoder.stages.{i}.downsampling_layer.1.weight") )
rename_keys.append((F"backbone.downsample_layers.{i}.1.bias", F"backbone.encoder.stages.{i}.downsampling_layer.1.bias") )
rename_keys.append((F"backbone.norm{i}.weight", F"backbone.hidden_states_norms.stage{i+1}.weight") )
rename_keys.append((F"backbone.norm{i}.bias", F"backbone.hidden_states_norms.stage{i+1}.bias") )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = dct.pop(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = val
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = {
'upernet-convnext-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth',
'upernet-convnext-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth',
'upernet-convnext-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth',
'upernet-convnext-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth',
'upernet-convnext-xlarge': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth',
}
SCREAMING_SNAKE_CASE_ : Optional[int] = model_name_to_url[model_name]
SCREAMING_SNAKE_CASE_ : List[Any] = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location="cpu" )['state_dict']
SCREAMING_SNAKE_CASE_ : Tuple = get_upernet_config(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[Any] = UperNetForSemanticSegmentation(lowerCamelCase_ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE_ : Optional[Any] = state_dict.pop(lowerCamelCase_ )
if "bn" in key:
SCREAMING_SNAKE_CASE_ : Tuple = key.replace("bn" , "batch_norm" )
SCREAMING_SNAKE_CASE_ : List[Any] = val
# rename keys
SCREAMING_SNAKE_CASE_ : Union[str, Any] = create_rename_keys(lowerCamelCase_ )
for src, dest in rename_keys:
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
# verify on image
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
SCREAMING_SNAKE_CASE_ : Dict = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ).convert("RGB" )
SCREAMING_SNAKE_CASE_ : List[Any] = SegformerImageProcessor()
SCREAMING_SNAKE_CASE_ : Tuple = processor(lowerCamelCase_ , return_tensors="pt" ).pixel_values
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(lowerCamelCase_ )
if model_name == "upernet-convnext-tiny":
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
SCREAMING_SNAKE_CASE_ : str = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print("Logits:" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCamelCase_ , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCamelCase_ )
print(F"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
print(F"Pushing model and processor for {model_name} to hub" )
model.push_to_hub(F"openmmlab/{model_name}" )
processor.push_to_hub(F"openmmlab/{model_name}" )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-convnext-tiny',
type=str,
choices=[F'''upernet-convnext-{size}''' for size in ['tiny', 'small', 'base', 'large', 'xlarge']],
help='Name of the ConvNext UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
snake_case_ = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 706 |
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__( self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = True , lowercase__ = None , lowercase__ = False , lowercase__ = None , lowercase__ = True , lowercase__ = "arrow" , **lowercase__ , ):
"""simple docstring"""
super().__init__(
split=lowercase__ , features=lowercase__ , cache_dir=lowercase__ , keep_in_memory=lowercase__ , streaming=lowercase__ , **lowercase__ , )
SCREAMING_SNAKE_CASE_ : Any = load_from_cache_file
SCREAMING_SNAKE_CASE_ : Optional[int] = file_format
SCREAMING_SNAKE_CASE_ : List[Any] = Spark(
df=lowercase__ , features=lowercase__ , cache_dir=lowercase__ , working_dir=lowercase__ , **lowercase__ , )
def __lowerCamelCase ( self ):
"""simple docstring"""
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
SCREAMING_SNAKE_CASE_ : Optional[int] = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=lowercase__ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 68 | 0 |
'''simple docstring'''
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int ) -> bool:
"""simple docstring"""
if p < 2:
raise ValueError("p should not be less than 2!" )
elif p == 2:
return True
SCREAMING_SNAKE_CASE_ : str = 4
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (1 << p) - 1
for _ in range(p - 2 ):
SCREAMING_SNAKE_CASE_ : Optional[int] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(1_1))
| 707 |
'''simple docstring'''
import re
import string
import numpy as np
import datasets
snake_case_ = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
snake_case_ = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
snake_case_ = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION,_KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def __lowerCamelCase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , reference_urls=[] , )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__=None , lowercase__=False , lowercase__=False , lowercase__=False , ):
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array([re.sub(lowercase__ , "" , lowercase__ ) for x in predictions] )
SCREAMING_SNAKE_CASE_ : List[Any] = np.array([re.sub(lowercase__ , "" , lowercase__ ) for x in references] )
else:
SCREAMING_SNAKE_CASE_ : int = np.asarray(lowercase__ )
SCREAMING_SNAKE_CASE_ : Any = np.asarray(lowercase__ )
if ignore_case:
SCREAMING_SNAKE_CASE_ : Dict = np.char.lower(lowercase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = np.char.lower(lowercase__ )
if ignore_punctuation:
SCREAMING_SNAKE_CASE_ : Optional[int] = string.punctuation.maketrans("" , "" , string.punctuation )
SCREAMING_SNAKE_CASE_ : int = np.char.translate(lowercase__ , table=lowercase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.char.translate(lowercase__ , table=lowercase__ )
if ignore_numbers:
SCREAMING_SNAKE_CASE_ : Optional[int] = string.digits.maketrans("" , "" , string.digits )
SCREAMING_SNAKE_CASE_ : Dict = np.char.translate(lowercase__ , table=lowercase__ )
SCREAMING_SNAKE_CASE_ : int = np.char.translate(lowercase__ , table=lowercase__ )
SCREAMING_SNAKE_CASE_ : str = predictions == references
return {"exact_match": np.mean(lowercase__ ) * 100}
| 68 | 0 |
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class SCREAMING_SNAKE_CASE__ ( __A ):
def __init__( self , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = False , lowercase__ = False , lowercase__ = None , **lowercase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = path_or_paths
SCREAMING_SNAKE_CASE_ : int = split if split or isinstance(lowercase__ , lowercase__ ) else '''train'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = features
SCREAMING_SNAKE_CASE_ : List[Any] = cache_dir
SCREAMING_SNAKE_CASE_ : Optional[int] = keep_in_memory
SCREAMING_SNAKE_CASE_ : Tuple = streaming
SCREAMING_SNAKE_CASE_ : Dict = num_proc
SCREAMING_SNAKE_CASE_ : Optional[Any] = kwargs
@abstractmethod
def __lowerCamelCase ( self ):
"""simple docstring"""
pass
class SCREAMING_SNAKE_CASE__ ( __A ):
def __init__( self , lowercase__ = None , lowercase__ = None , lowercase__ = False , lowercase__ = False , lowercase__ = None , **lowercase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = features
SCREAMING_SNAKE_CASE_ : Any = cache_dir
SCREAMING_SNAKE_CASE_ : Union[str, Any] = keep_in_memory
SCREAMING_SNAKE_CASE_ : int = streaming
SCREAMING_SNAKE_CASE_ : Dict = num_proc
SCREAMING_SNAKE_CASE_ : Optional[int] = kwargs
@abstractmethod
def __lowerCamelCase ( self ):
"""simple docstring"""
pass
| 708 |
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case_ = logging.get_logger(__name__)
snake_case_ = {'vocab_file': 'spiece.model'}
snake_case_ = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
snake_case_ = {
'AI-Sweden/gpt-sw3-126m': 2_0_4_8,
'AI-Sweden/gpt-sw3-350m': 2_0_4_8,
'AI-Sweden/gpt-sw3-1.6b': 2_0_4_8,
'AI-Sweden/gpt-sw3-6.7b': 2_0_4_8,
'AI-Sweden/gpt-sw3-20b': 2_0_4_8,
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = ["input_ids", "attention_mask"]
def __init__( self , lowercase__ , lowercase__=False , lowercase__=False , lowercase__=False , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__ = None , **lowercase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
SCREAMING_SNAKE_CASE_ : Dict = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
SCREAMING_SNAKE_CASE_ : str = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
SCREAMING_SNAKE_CASE_ : List[Any] = "<|endoftext|>" if eos_token is None else eos_token
SCREAMING_SNAKE_CASE_ : Dict = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
SCREAMING_SNAKE_CASE_ : Tuple = unk_token if pad_token is None else pad_token
SCREAMING_SNAKE_CASE_ : Optional[Any] = eos_token if bos_token is None else bos_token
else:
SCREAMING_SNAKE_CASE_ : int = "<pad>" if pad_token is None else pad_token
SCREAMING_SNAKE_CASE_ : Any = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=lowercase__ , remove_space=lowercase__ , keep_accents=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , unk_token=lowercase__ , pad_token=lowercase__ , sp_model_kwargs=self.sp_model_kwargs , **lowercase__ , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = do_lower_case
SCREAMING_SNAKE_CASE_ : Optional[int] = remove_space
SCREAMING_SNAKE_CASE_ : int = keep_accents
SCREAMING_SNAKE_CASE_ : List[Any] = vocab_file
SCREAMING_SNAKE_CASE_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase__ )
# Used for whitespace normalization in input texts
# fmt : off
SCREAMING_SNAKE_CASE_ : int = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(
F"[{''.join(map(lowercase__ , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]" )
def __getstate__( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : Dict = None
return state
def __setstate__( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {}
SCREAMING_SNAKE_CASE_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def __lowerCamelCase ( self ):
"""simple docstring"""
return len(self.sp_model )
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.non_printing_characters_re.sub("" , lowercase__ )
# Normalize whitespaces
SCREAMING_SNAKE_CASE_ : List[str] = "".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
SCREAMING_SNAKE_CASE_ : List[Any] = unicodedata.normalize("NFC" , lowercase__ )
return text
def __lowerCamelCase ( self , lowercase__ , **lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.preprocess_text(lowercase__ )
return self.sp_model.encode(lowercase__ , out_type=lowercase__ )
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
return self.sp_model.PieceToId(lowercase__ )
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
return self.sp_model.IdToPiece(lowercase__ )
@staticmethod
def __lowerCamelCase ( lowercase__ ):
"""simple docstring"""
return out_string
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
SCREAMING_SNAKE_CASE_ : Any = ""
SCREAMING_SNAKE_CASE_ : Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase__ ) + token
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
SCREAMING_SNAKE_CASE_ : int = []
else:
current_sub_tokens.append(lowercase__ )
SCREAMING_SNAKE_CASE_ : List[str] = False
out_string += self.sp_model.decode(lowercase__ )
return out_string
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = {self.convert_ids_to_tokens(lowercase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCamelCase ( self , lowercase__ , lowercase__ = None ):
"""simple docstring"""
if not os.path.isdir(lowercase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE_ : Any = os.path.join(
lowercase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase__ , "wb" ) as fi:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(lowercase__ )
return (out_vocab_file,)
def __lowerCamelCase ( self , lowercase__ , lowercase__ = False ):
"""simple docstring"""
if isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = self.preprocess_text(lowercase__ )
SCREAMING_SNAKE_CASE_ : Any = self.sp_model.encode(lowercase__ )
else:
SCREAMING_SNAKE_CASE_ : str = [self.preprocess_text(lowercase__ ) for t in text]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.sp_model.encode(lowercase__ )
if return_tensors is True or return_tensors == "pt":
SCREAMING_SNAKE_CASE_ : str = torch.tensor(lowercase__ )
return token_ids
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
return self.sp_model.decode(lowercase__ )
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [F"User: {text}" if is_user else F"Bot: {text}" for is_user, text in conversation.iter_texts()]
SCREAMING_SNAKE_CASE_ : List[str] = (
F"{self.eos_token}{self.bos_token}" + F"{self.bos_token}".join(lowercase__ ) + F"{self.bos_token}Bot:"
)
return self.encode(text=lowercase__ )
| 68 | 0 |
'''simple docstring'''
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : list[list[float]] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = []
for data in source_data:
for i, el in enumerate(__lowerCAmelCase ):
if len(__lowerCAmelCase ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(__lowerCAmelCase ) )
return data_lists
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : list[list[float]] , SCREAMING_SNAKE_CASE_ : list[int] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = []
for dlist, weight in zip(__lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[str] = min(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = max(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = F"Invalid weight of {weight:f} provided"
raise ValueError(__lowerCAmelCase )
score_lists.append(__lowerCAmelCase )
return score_lists
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : list[list[float]] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : str = final_scores[j] + ele
return final_scores
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : list[list[float]] , SCREAMING_SNAKE_CASE_ : list[int] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = get_data(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = calculate_each_score(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = generate_final_scores(__lowerCAmelCase )
# append scores to source data
for i, ele in enumerate(__lowerCAmelCase ):
source_data[i].append(__lowerCAmelCase )
return source_data
| 709 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
snake_case_ = True
except (ImportError, ModuleNotFoundError):
snake_case_ = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
re.sub("<n>" , "" , SCREAMING_SNAKE_CASE_ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(SCREAMING_SNAKE_CASE_ ) )
| 68 | 0 |
'''simple docstring'''
def __lowerCamelCase ( ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = 0
for i in range(1 , 1_0_0_1 ):
total += i**i
return str(lowerCamelCase__ )[-1_0:]
if __name__ == "__main__":
print(solution())
| 710 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self , lowercase__ , lowercase__=2 , lowercase__=3 , lowercase__=4 , lowercase__=2 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=99 , lowercase__=36 , lowercase__=2 , lowercase__=4 , lowercase__=37 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=512 , lowercase__=16 , lowercase__=2 , lowercase__=0.02 , lowercase__=6 , lowercase__=6 , lowercase__=3 , lowercase__=4 , lowercase__=None , lowercase__=1000 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = parent
SCREAMING_SNAKE_CASE_ : Optional[int] = batch_size
SCREAMING_SNAKE_CASE_ : Dict = num_channels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_size
SCREAMING_SNAKE_CASE_ : Optional[int] = patch_size
SCREAMING_SNAKE_CASE_ : str = is_training
SCREAMING_SNAKE_CASE_ : str = use_input_mask
SCREAMING_SNAKE_CASE_ : Any = use_token_type_ids
SCREAMING_SNAKE_CASE_ : int = use_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE_ : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Any = num_attention_heads
SCREAMING_SNAKE_CASE_ : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : str = hidden_act
SCREAMING_SNAKE_CASE_ : int = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Dict = type_vocab_size
SCREAMING_SNAKE_CASE_ : List[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Tuple = initializer_range
SCREAMING_SNAKE_CASE_ : List[str] = coordinate_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = shape_size
SCREAMING_SNAKE_CASE_ : List[str] = num_labels
SCREAMING_SNAKE_CASE_ : Optional[int] = num_choices
SCREAMING_SNAKE_CASE_ : Union[str, Any] = scope
SCREAMING_SNAKE_CASE_ : Dict = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = text_seq_length
SCREAMING_SNAKE_CASE_ : Tuple = (image_size // patch_size) ** 2 + 1
SCREAMING_SNAKE_CASE_ : Optional[int] = self.text_seq_length + self.image_seq_length
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
SCREAMING_SNAKE_CASE_ : Dict = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
SCREAMING_SNAKE_CASE_ : Optional[int] = bbox[i, j, 3]
SCREAMING_SNAKE_CASE_ : str = bbox[i, j, 1]
SCREAMING_SNAKE_CASE_ : Dict = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
SCREAMING_SNAKE_CASE_ : List[Any] = bbox[i, j, 2]
SCREAMING_SNAKE_CASE_ : Dict = bbox[i, j, 0]
SCREAMING_SNAKE_CASE_ : Tuple = tmp_coordinate
SCREAMING_SNAKE_CASE_ : Dict = tf.constant(lowercase__ )
SCREAMING_SNAKE_CASE_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ : Dict = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Any = random_attention_mask([self.batch_size, self.text_seq_length] )
SCREAMING_SNAKE_CASE_ : List[str] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : Tuple = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ : str = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = TFLayoutLMvaModel(config=lowercase__ )
# text + image
SCREAMING_SNAKE_CASE_ : int = model(lowercase__ , pixel_values=lowercase__ , training=lowercase__ )
SCREAMING_SNAKE_CASE_ : str = model(
lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , training=lowercase__ , )
SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ , training=lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
SCREAMING_SNAKE_CASE_ : Tuple = model(lowercase__ , training=lowercase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
SCREAMING_SNAKE_CASE_ : int = model({"pixel_values": pixel_values} , training=lowercase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.num_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TFLayoutLMvaForSequenceClassification(config=lowercase__ )
SCREAMING_SNAKE_CASE_ : List[str] = model(
lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ , training=lowercase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.num_labels
SCREAMING_SNAKE_CASE_ : Any = TFLayoutLMvaForTokenClassification(config=lowercase__ )
SCREAMING_SNAKE_CASE_ : int = model(
lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ , training=lowercase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = 2
SCREAMING_SNAKE_CASE_ : List[Any] = TFLayoutLMvaForQuestionAnswering(config=lowercase__ )
SCREAMING_SNAKE_CASE_ : int = model(
lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , start_positions=lowercase__ , end_positions=lowercase__ , training=lowercase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE_), (SCREAMING_SNAKE_CASE_), (SCREAMING_SNAKE_CASE_), (SCREAMING_SNAKE_CASE_), (SCREAMING_SNAKE_CASE_), (SCREAMING_SNAKE_CASE_), (SCREAMING_SNAKE_CASE_), (SCREAMING_SNAKE_CASE_)) : Any = config_and_inputs
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase,_UpperCAmelCase,unittest.TestCase ):
_A = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
_A = (
{"document-question-answering": TFLayoutLMvaForQuestionAnswering, "feature-extraction": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
_A = False
_A = False
_A = False
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
return True
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = copy.deepcopy(lowercase__ )
if model_class in get_values(lowercase__ ):
SCREAMING_SNAKE_CASE_ : str = {
k: tf.tile(tf.expand_dims(lowercase__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(lowercase__ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowercase__ ):
SCREAMING_SNAKE_CASE_ : Tuple = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowercase__ ):
SCREAMING_SNAKE_CASE_ : Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
SCREAMING_SNAKE_CASE_ : List[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowercase__ ):
SCREAMING_SNAKE_CASE_ : List[str] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowercase__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = TFLayoutLMvaModelTester(self )
SCREAMING_SNAKE_CASE_ : int = ConfigTester(self , config_class=lowercase__ , hidden_size=37 )
def __lowerCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : int = model_class(lowercase__ )
if getattr(lowercase__ , "hf_compute_loss" , lowercase__ ):
# The number of elements in the loss should be the same as the number of elements in the label
SCREAMING_SNAKE_CASE_ : Dict = self._prepare_for_class(inputs_dict.copy() , lowercase__ , return_labels=lowercase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=lowercase__ )[0]
]
SCREAMING_SNAKE_CASE_ : Any = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
SCREAMING_SNAKE_CASE_ : Dict = self._prepare_for_class(inputs_dict.copy() , lowercase__ , return_labels=lowercase__ )
SCREAMING_SNAKE_CASE_ : List[str] = prepared_for_class.pop("input_ids" )
SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase__ , **lowercase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
SCREAMING_SNAKE_CASE_ : List[Any] = self._prepare_for_class(inputs_dict.copy() , lowercase__ , return_labels=lowercase__ )
SCREAMING_SNAKE_CASE_ : int = prepared_for_class.pop("input_ids" )
if "labels" in prepared_for_class:
SCREAMING_SNAKE_CASE_ : str = prepared_for_class["labels"].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
SCREAMING_SNAKE_CASE_ : str = -100
SCREAMING_SNAKE_CASE_ : str = tf.convert_to_tensor(lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = model(lowercase__ , **lowercase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
SCREAMING_SNAKE_CASE_ : List[Any] = self._prepare_for_class(inputs_dict.copy() , lowercase__ , return_labels=lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = model(lowercase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
SCREAMING_SNAKE_CASE_ : Optional[Any] = self._prepare_for_class(inputs_dict.copy() , lowercase__ , return_labels=lowercase__ )
# Get keys that were added with the _prepare_for_class function
SCREAMING_SNAKE_CASE_ : int = prepared_for_class.keys() - inputs_dict.keys()
SCREAMING_SNAKE_CASE_ : Optional[int] = inspect.signature(model.call ).parameters
SCREAMING_SNAKE_CASE_ : Tuple = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
SCREAMING_SNAKE_CASE_ : List[Any] = {0: "input_ids"}
for label_key in label_keys:
SCREAMING_SNAKE_CASE_ : Optional[int] = signature_names.index(lowercase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = label_key
SCREAMING_SNAKE_CASE_ : List[str] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
SCREAMING_SNAKE_CASE_ : List[str] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
SCREAMING_SNAKE_CASE_ : List[str] = prepared_for_class[value]
SCREAMING_SNAKE_CASE_ : List[Any] = tuple(lowercase__ )
# Send to model
SCREAMING_SNAKE_CASE_ : int = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def __lowerCamelCase ( self ):
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
),
) : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
),
) : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE_ : List[str] = type
self.model_tester.create_and_check_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
),
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
),
) : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
),
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
@slow
def __lowerCamelCase ( self ):
"""simple docstring"""
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TFLayoutLMvaModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
def __lowerCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def __lowerCamelCase ( self ):
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=lowercase__ ) if is_vision_available() else None
@slow
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = TFLayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" )
SCREAMING_SNAKE_CASE_ : Any = self.default_image_processor
SCREAMING_SNAKE_CASE_ : Optional[int] = prepare_img()
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_processor(images=lowercase__ , return_tensors="tf" ).pixel_values
SCREAMING_SNAKE_CASE_ : Dict = tf.constant([[1, 2]] )
SCREAMING_SNAKE_CASE_ : Any = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
SCREAMING_SNAKE_CASE_ : List[Any] = model(input_ids=lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ , training=lowercase__ )
# verify the logits
SCREAMING_SNAKE_CASE_ : Tuple = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , lowercase__ )
SCREAMING_SNAKE_CASE_ : int = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase__ , atol=1e-4 ) )
| 68 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
_A = "yolos"
def __init__( self , lowercase__=768 , lowercase__=12 , lowercase__=12 , lowercase__=3072 , lowercase__="gelu" , lowercase__=0.0 , lowercase__=0.0 , lowercase__=0.02 , lowercase__=1e-12 , lowercase__=[512, 864] , lowercase__=16 , lowercase__=3 , lowercase__=True , lowercase__=100 , lowercase__=True , lowercase__=False , lowercase__=1 , lowercase__=5 , lowercase__=2 , lowercase__=5 , lowercase__=2 , lowercase__=0.1 , **lowercase__ , ):
"""simple docstring"""
super().__init__(**__A )
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_size
SCREAMING_SNAKE_CASE_ : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Any = num_attention_heads
SCREAMING_SNAKE_CASE_ : str = intermediate_size
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE_ : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Any = initializer_range
SCREAMING_SNAKE_CASE_ : Optional[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Dict = image_size
SCREAMING_SNAKE_CASE_ : Dict = patch_size
SCREAMING_SNAKE_CASE_ : Dict = num_channels
SCREAMING_SNAKE_CASE_ : List[str] = qkv_bias
SCREAMING_SNAKE_CASE_ : Optional[int] = num_detection_tokens
SCREAMING_SNAKE_CASE_ : Tuple = use_mid_position_embeddings
SCREAMING_SNAKE_CASE_ : List[str] = auxiliary_loss
# Hungarian matcher
SCREAMING_SNAKE_CASE_ : List[Any] = class_cost
SCREAMING_SNAKE_CASE_ : List[str] = bbox_cost
SCREAMING_SNAKE_CASE_ : str = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE_ : Optional[int] = bbox_loss_coefficient
SCREAMING_SNAKE_CASE_ : List[str] = giou_loss_coefficient
SCREAMING_SNAKE_CASE_ : Tuple = eos_coefficient
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
_A = version.parse("1.11" )
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
return 1e-4
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
return 12
| 711 |
'''simple docstring'''
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = [1]
for i in range(2 , SCREAMING_SNAKE_CASE_ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
SCREAMING_SNAKE_CASE_ : Dict = list(range(SCREAMING_SNAKE_CASE_ ) )
# Find permutation
while factorials:
SCREAMING_SNAKE_CASE_ : Any = factorials.pop()
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Optional[int] = divmod(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 68 | 0 |
'''simple docstring'''
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
snake_case_ : Any = {
"sample_size": 3_2,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": 1_0_0_0,
"block_out_channels": [3_2, 6_4],
"attention_head_dim": 8,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
snake_case_ : Any = {
"sample_size": 6_4,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 3,
"num_class_embeds": 1_0_0_0,
"block_out_channels": [1_9_2, 1_9_2 * 2, 1_9_2 * 3, 1_9_2 * 4],
"attention_head_dim": 6_4,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
snake_case_ : Dict = {
"sample_size": 2_5_6,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": None,
"block_out_channels": [2_5_6, 2_5_6, 2_5_6 * 2, 2_5_6 * 2, 2_5_6 * 4, 2_5_6 * 4],
"attention_head_dim": 6_4,
"down_block_types": [
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "default",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
snake_case_ : Union[str, Any] = {
"num_train_timesteps": 4_0,
"sigma_min": 0.0_0_2,
"sigma_max": 8_0.0,
}
snake_case_ : List[str] = {
"num_train_timesteps": 2_0_1,
"sigma_min": 0.0_0_2,
"sigma_max": 8_0.0,
}
snake_case_ : int = {
"num_train_timesteps": 1_5_1,
"sigma_min": 0.0_0_2,
"sigma_max": 8_0.0,
}
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[str] ) -> str:
"""simple docstring"""
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("boolean value expected" )
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any]=False ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = checkpoint[F"{old_prefix}.in_layers.0.weight"]
SCREAMING_SNAKE_CASE_ : str = checkpoint[F"{old_prefix}.in_layers.0.bias"]
SCREAMING_SNAKE_CASE_ : Optional[Any] = checkpoint[F"{old_prefix}.in_layers.2.weight"]
SCREAMING_SNAKE_CASE_ : List[Any] = checkpoint[F"{old_prefix}.in_layers.2.bias"]
SCREAMING_SNAKE_CASE_ : Optional[int] = checkpoint[F"{old_prefix}.emb_layers.1.weight"]
SCREAMING_SNAKE_CASE_ : str = checkpoint[F"{old_prefix}.emb_layers.1.bias"]
SCREAMING_SNAKE_CASE_ : str = checkpoint[F"{old_prefix}.out_layers.0.weight"]
SCREAMING_SNAKE_CASE_ : Optional[Any] = checkpoint[F"{old_prefix}.out_layers.0.bias"]
SCREAMING_SNAKE_CASE_ : Tuple = checkpoint[F"{old_prefix}.out_layers.3.weight"]
SCREAMING_SNAKE_CASE_ : Tuple = checkpoint[F"{old_prefix}.out_layers.3.bias"]
if has_skip:
SCREAMING_SNAKE_CASE_ : Tuple = checkpoint[F"{old_prefix}.skip_connection.weight"]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = checkpoint[F"{old_prefix}.skip_connection.bias"]
return new_checkpoint
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any]=None ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = checkpoint[F"{old_prefix}.qkv.weight"].chunk(3 , dim=0 )
SCREAMING_SNAKE_CASE_ : Dict = checkpoint[F"{old_prefix}.qkv.bias"].chunk(3 , dim=0 )
SCREAMING_SNAKE_CASE_ : Dict = checkpoint[F"{old_prefix}.norm.weight"]
SCREAMING_SNAKE_CASE_ : Any = checkpoint[F"{old_prefix}.norm.bias"]
SCREAMING_SNAKE_CASE_ : str = weight_q.squeeze(-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE_ : Tuple = bias_q.squeeze(-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE_ : Dict = weight_k.squeeze(-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE_ : int = bias_k.squeeze(-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE_ : str = weight_v.squeeze(-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE_ : Dict = bias_v.squeeze(-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
checkpoint[F"{old_prefix}.proj_out.weight"].squeeze(-1 ).squeeze(-1 )
)
SCREAMING_SNAKE_CASE_ : Dict = checkpoint[F"{old_prefix}.proj_out.bias"].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.load(__lowerCAmelCase , map_location="cpu" )
SCREAMING_SNAKE_CASE_ : Dict = {}
SCREAMING_SNAKE_CASE_ : Any = checkpoint["""time_embed.0.weight"""]
SCREAMING_SNAKE_CASE_ : str = checkpoint["""time_embed.0.bias"""]
SCREAMING_SNAKE_CASE_ : int = checkpoint["""time_embed.2.weight"""]
SCREAMING_SNAKE_CASE_ : List[Any] = checkpoint["""time_embed.2.bias"""]
if unet_config["num_class_embeds"] is not None:
SCREAMING_SNAKE_CASE_ : Dict = checkpoint["""label_emb.weight"""]
SCREAMING_SNAKE_CASE_ : Optional[Any] = checkpoint["""input_blocks.0.0.weight"""]
SCREAMING_SNAKE_CASE_ : Dict = checkpoint["""input_blocks.0.0.bias"""]
SCREAMING_SNAKE_CASE_ : Optional[Any] = unet_config["""down_block_types"""]
SCREAMING_SNAKE_CASE_ : List[str] = unet_config["""layers_per_block"""]
SCREAMING_SNAKE_CASE_ : Optional[int] = unet_config["""attention_head_dim"""]
SCREAMING_SNAKE_CASE_ : str = unet_config["""block_out_channels"""]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 1
SCREAMING_SNAKE_CASE_ : Any = channels_list[0]
for i, layer_type in enumerate(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Optional[int] = channels_list[i]
SCREAMING_SNAKE_CASE_ : List[Any] = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Any = F"down_blocks.{i}.resnets.{j}"
SCREAMING_SNAKE_CASE_ : Tuple = F"input_blocks.{current_layer}.0"
SCREAMING_SNAKE_CASE_ : str = True if j == 0 and downsample_block_has_skip else False
SCREAMING_SNAKE_CASE_ : Any = convert_resnet(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , has_skip=__lowerCAmelCase )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = F"down_blocks.{i}.resnets.{j}"
SCREAMING_SNAKE_CASE_ : Optional[int] = F"input_blocks.{current_layer}.0"
SCREAMING_SNAKE_CASE_ : int = True if j == 0 and downsample_block_has_skip else False
SCREAMING_SNAKE_CASE_ : Optional[int] = convert_resnet(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , has_skip=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = F"down_blocks.{i}.attentions.{j}"
SCREAMING_SNAKE_CASE_ : Optional[int] = F"input_blocks.{current_layer}.1"
SCREAMING_SNAKE_CASE_ : List[str] = convert_attention(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
current_layer += 1
if i != len(__lowerCAmelCase ) - 1:
SCREAMING_SNAKE_CASE_ : str = F"down_blocks.{i}.downsamplers.0"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = F"input_blocks.{current_layer}.0"
SCREAMING_SNAKE_CASE_ : List[Any] = convert_resnet(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
current_layer += 1
SCREAMING_SNAKE_CASE_ : Tuple = current_channels
# hardcoded the mid-block for now
SCREAMING_SNAKE_CASE_ : Optional[Any] = """mid_block.resnets.0"""
SCREAMING_SNAKE_CASE_ : List[str] = """middle_block.0"""
SCREAMING_SNAKE_CASE_ : Any = convert_resnet(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : str = """mid_block.attentions.0"""
SCREAMING_SNAKE_CASE_ : Tuple = """middle_block.1"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = convert_attention(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = """mid_block.resnets.1"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = """middle_block.2"""
SCREAMING_SNAKE_CASE_ : Optional[int] = convert_resnet(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = 0
SCREAMING_SNAKE_CASE_ : Union[str, Any] = unet_config["""up_block_types"""]
for i, layer_type in enumerate(__lowerCAmelCase ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = F"up_blocks.{i}.resnets.{j}"
SCREAMING_SNAKE_CASE_ : str = F"output_blocks.{current_layer}.0"
SCREAMING_SNAKE_CASE_ : Tuple = convert_resnet(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , has_skip=__lowerCAmelCase )
current_layer += 1
if i != len(__lowerCAmelCase ) - 1:
SCREAMING_SNAKE_CASE_ : Any = F"up_blocks.{i}.upsamplers.0"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = F"output_blocks.{current_layer-1}.1"
SCREAMING_SNAKE_CASE_ : Any = convert_resnet(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
SCREAMING_SNAKE_CASE_ : Dict = F"up_blocks.{i}.resnets.{j}"
SCREAMING_SNAKE_CASE_ : Dict = F"output_blocks.{current_layer}.0"
SCREAMING_SNAKE_CASE_ : int = convert_resnet(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , has_skip=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = F"up_blocks.{i}.attentions.{j}"
SCREAMING_SNAKE_CASE_ : List[str] = F"output_blocks.{current_layer}.1"
SCREAMING_SNAKE_CASE_ : str = convert_attention(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
current_layer += 1
if i != len(__lowerCAmelCase ) - 1:
SCREAMING_SNAKE_CASE_ : List[Any] = F"up_blocks.{i}.upsamplers.0"
SCREAMING_SNAKE_CASE_ : List[str] = F"output_blocks.{current_layer-1}.2"
SCREAMING_SNAKE_CASE_ : Optional[int] = convert_resnet(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = checkpoint["""out.0.weight"""]
SCREAMING_SNAKE_CASE_ : Dict = checkpoint["""out.0.bias"""]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = checkpoint["""out.2.weight"""]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = checkpoint["""out.2.bias"""]
return new_checkpoint
if __name__ == "__main__":
snake_case_ : List[str] = argparse.ArgumentParser()
parser.add_argument('--unet_path', default=None, type=str, required=True, help='Path to the unet.pt to convert.')
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output the converted UNet model.'
)
parser.add_argument('--class_cond', default=True, type=str, help='Whether the model is class-conditional.')
snake_case_ : Union[str, Any] = parser.parse_args()
snake_case_ : int = strabool(args.class_cond)
snake_case_ : int = os.path.basename(args.unet_path)
print(F'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
snake_case_ : Any = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
snake_case_ : int = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
snake_case_ : Optional[int] = TEST_UNET_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
snake_case_ : Any = None
snake_case_ : Optional[Any] = con_pt_to_diffuser(args.unet_path, unet_config)
snake_case_ : Dict = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
snake_case_ : Optional[int] = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
snake_case_ : int = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
snake_case_ : List[str] = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
snake_case_ : List[Any] = CMStochasticIterativeScheduler(**scheduler_config)
snake_case_ : str = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 712 |
'''simple docstring'''
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--original_config_file',
type=str,
required=True,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--image_size',
default=5_1_2,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Dict:
"""simple docstring"""
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F"could not parse string as bool {string}" )
parser.add_argument(
'--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool
)
parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int)
snake_case_ = parser.parse_args()
snake_case_ = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 68 | 0 |
'''simple docstring'''
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=7 ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
if token is not None:
SCREAMING_SNAKE_CASE_ : Optional[int] = {"Accept": "application/vnd.github+json", "Authorization": F"Bearer {token}"}
# The id of a workflow (not of a workflow run)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "636036"
SCREAMING_SNAKE_CASE_ : Tuple = F"https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F"?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = requests.get(__UpperCAmelCase , headers=__UpperCAmelCase ).json()
return result["workflow_runs"]
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = get_daily_ci_runs(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
SCREAMING_SNAKE_CASE_ : Dict = workflow_run["id"]
break
return workflow_run_id
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = get_last_daily_ci_runs(__UpperCAmelCase )
if workflow_run_id is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = get_artifacts_links(worflow_run_id=__UpperCAmelCase , token=__UpperCAmelCase )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
SCREAMING_SNAKE_CASE_ : Optional[Any] = artifacts_links[artifact_name]
download_artifact(
artifact_name=__UpperCAmelCase , artifact_url=__UpperCAmelCase , output_dir=__UpperCAmelCase , token=__UpperCAmelCase )
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> str:
"""simple docstring"""
get_last_daily_ci_artifacts(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = {}
for artifact_name in artifact_names:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(__UpperCAmelCase , F"{artifact_name}.zip" )
if os.path.isfile(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[str] = {}
with zipfile.ZipFile(__UpperCAmelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(__UpperCAmelCase ):
# read the file
with z.open(__UpperCAmelCase ) as f:
SCREAMING_SNAKE_CASE_ : List[str] = f.read().decode("UTF-8" )
return results
| 713 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
_A = "camembert"
def __init__( self , lowercase__=3_0522 , lowercase__=768 , lowercase__=12 , lowercase__=12 , lowercase__=3072 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=512 , lowercase__=2 , lowercase__=0.02 , lowercase__=1e-12 , lowercase__=1 , lowercase__=0 , lowercase__=2 , lowercase__="absolute" , lowercase__=True , lowercase__=None , **lowercase__ , ):
"""simple docstring"""
super().__init__(pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__ )
SCREAMING_SNAKE_CASE_ : Tuple = vocab_size
SCREAMING_SNAKE_CASE_ : str = hidden_size
SCREAMING_SNAKE_CASE_ : str = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Dict = num_attention_heads
SCREAMING_SNAKE_CASE_ : Any = hidden_act
SCREAMING_SNAKE_CASE_ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : List[str] = type_vocab_size
SCREAMING_SNAKE_CASE_ : Tuple = initializer_range
SCREAMING_SNAKE_CASE_ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : List[Any] = position_embedding_type
SCREAMING_SNAKE_CASE_ : Any = use_cache
SCREAMING_SNAKE_CASE_ : Optional[int] = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE_ : Any = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 68 | 0 |
'''simple docstring'''
from graphs.minimum_spanning_tree_kruskal import kruskal
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 9
SCREAMING_SNAKE_CASE_ : Dict = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 1_4],
[3, 4, 9],
[5, 4, 1_0],
[1, 7, 1_1],
]
SCREAMING_SNAKE_CASE_ : List[Any] = kruskal(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(lowerCAmelCase__ ) == sorted(lowerCAmelCase__ )
| 714 |
'''simple docstring'''
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : list[int] ) -> list[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = len(SCREAMING_SNAKE_CASE_ )
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE_ ):
if numbers[j] < numbers[i]:
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Tuple = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
snake_case_ = input('Enter numbers separated by a comma:\n').strip()
snake_case_ = [int(item) for item in user_input.split(',')]
print(exchange_sort(unsorted))
| 68 | 0 |
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[str] = False ) -> Tuple:
"""simple docstring"""
if radian_mode:
return [magnitude * cos(__A ), magnitude * sin(__A )]
return [magnitude * cos(radians(__A ) ), magnitude * sin(radians(__A ) )]
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple = 1_0**-1 ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : NDArray[floataa] = cross(__A , __A )
SCREAMING_SNAKE_CASE_ : float = sum(__A )
return abs(__A ) < eps
if __name__ == "__main__":
# Test to check if it works
snake_case_ = array(
[
polar_force(7_1_8.4, 1_8_0 - 3_0),
polar_force(8_7_9.5_4, 4_5),
polar_force(1_0_0, -9_0),
]
)
snake_case_ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
snake_case_ = array(
[
polar_force(3_0 * 9.8_1, 1_5),
polar_force(2_1_5, 1_8_0 - 4_5),
polar_force(2_6_4, 9_0 - 3_0),
]
)
snake_case_ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
snake_case_ = array([[0, -2_0_0_0], [0, -1_2_0_0], [0, 1_5_6_0_0], [0, -1_2_4_0_0]])
snake_case_ = array([[0, 0], [6, 0], [1_0, 0], [1_2, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 715 |
'''simple docstring'''
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
snake_case_ = logging.getLogger()
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Path , SCREAMING_SNAKE_CASE_ : list ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = "\n".join(SCREAMING_SNAKE_CASE_ )
Path(SCREAMING_SNAKE_CASE_ ).open("w" ).writelines(SCREAMING_SNAKE_CASE_ )
snake_case_ = 'patrickvonplaten/t5-tiny-random'
snake_case_ = 'sshleifer/bart-tiny-random'
snake_case_ = 'sshleifer/tiny-mbart'
snake_case_ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source"
SCREAMING_SNAKE_CASE_ : List[str] = input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
SCREAMING_SNAKE_CASE_ : Dict = [" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."]
_dump_articles(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE_ : Tuple = str(Path(self.get_auto_remove_tmp_dir() ) / "scores.json" )
SCREAMING_SNAKE_CASE_ : Tuple = "translation_en_to_de" if model == T5_TINY else "summarization"
SCREAMING_SNAKE_CASE_ : Dict = F"\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n ".split()
with patch.object(lowercase__ , "argv" , lowercase__ ):
run_generate()
assert Path(lowercase__ ).exists()
# os.remove(Path(output_file_name))
def __lowerCamelCase ( self ):
"""simple docstring"""
self.run_eval_tester(lowercase__ )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
self.run_eval_tester(lowercase__ )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source"
SCREAMING_SNAKE_CASE_ : Optional[Any] = input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
SCREAMING_SNAKE_CASE_ : List[Any] = {
"en": ["Machine learning is great, isn't it?", "I like to eat bananas", "Tomorrow is another great day!"],
"de": [
"Maschinelles Lernen ist großartig, oder?",
"Ich esse gerne Bananen",
"Morgen ist wieder ein toller Tag!",
],
}
SCREAMING_SNAKE_CASE_ : Dict = Path(self.get_auto_remove_tmp_dir() )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = str(tmp_dir / "scores.json" )
SCREAMING_SNAKE_CASE_ : List[Any] = str(tmp_dir / "val.target" )
_dump_articles(lowercase__ , text["en"] )
_dump_articles(lowercase__ , text["de"] )
SCREAMING_SNAKE_CASE_ : List[Any] = "translation_en_to_de" if model == T5_TINY else "summarization"
SCREAMING_SNAKE_CASE_ : List[str] = F"\n run_eval_search.py\n {model}\n {str(lowercase__ )}\n {str(lowercase__ )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n ".split()
testargs.extend(["--search", "num_beams=1:2 length_penalty=0.9:1.0"] )
with patch.object(lowercase__ , "argv" , lowercase__ ):
with CaptureStdout() as cs:
run_search()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [" num_beams | length_penalty", model, "Best score args"]
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["Info"]
if "translation" in task:
expected_strings.append("bleu" )
else:
expected_strings.extend(lowercase__ )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(lowercase__ ).exists()
os.remove(Path(lowercase__ ) )
| 68 | 0 |
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
snake_case_ = pd.read_csv('sample_data.csv', header=None)
snake_case_ = df.shape[:1][0]
# If you're using some other dataset input the target column
snake_case_ = df.iloc[:, 1:2]
snake_case_ = actual_data.values.reshape(len_data, 1)
snake_case_ = MinMaxScaler().fit_transform(actual_data)
snake_case_ = 1_0
snake_case_ = 5
snake_case_ = 2_0
snake_case_ = len_data - periods * look_back
snake_case_ = actual_data[:division]
snake_case_ = actual_data[division - look_back :]
snake_case_ , snake_case_ = [], []
snake_case_ , snake_case_ = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
snake_case_ = np.array(train_x)
snake_case_ = np.array(test_x)
snake_case_ = np.array([list(i.ravel()) for i in train_y])
snake_case_ = np.array([list(i.ravel()) for i in test_y])
snake_case_ = Sequential()
model.add(LSTM(1_2_8, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(6_4, input_shape=(1_2_8, 1)))
model.add(Dense(forward_days))
model.compile(loss='mean_squared_error', optimizer='adam')
snake_case_ = model.fit(
x_train, y_train, epochs=1_5_0, verbose=1, shuffle=True, batch_size=4
)
snake_case_ = model.predict(x_test)
| 716 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : NDArray[floataa] , SCREAMING_SNAKE_CASE_ : NDArray[floataa] , SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : int , ) -> list[float]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : str = coefficient_matrix.shape
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : List[Any] = constant_matrix.shape
if rowsa != colsa:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = F"Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"
raise ValueError(SCREAMING_SNAKE_CASE_ )
if colsa != 1:
SCREAMING_SNAKE_CASE_ : List[Any] = F"Constant matrix must be nx1 but received {rowsa}x{colsa}"
raise ValueError(SCREAMING_SNAKE_CASE_ )
if rowsa != rowsa:
SCREAMING_SNAKE_CASE_ : Any = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
F"received {rowsa}x{colsa} and {rowsa}x{colsa}"
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) != rowsa:
SCREAMING_SNAKE_CASE_ : int = (
"Number of initial values must be equal to number of rows in coefficient "
F"matrix but received {len(SCREAMING_SNAKE_CASE_ )} and {rowsa}"
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
SCREAMING_SNAKE_CASE_ : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Any = table.shape
strictly_diagonally_dominant(SCREAMING_SNAKE_CASE_ )
# Iterates the whole matrix for given number of times
for _ in range(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ : Tuple = []
for row in range(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ : Any = 0
for col in range(SCREAMING_SNAKE_CASE_ ):
if col == row:
SCREAMING_SNAKE_CASE_ : Any = table[row][col]
elif col == cols - 1:
SCREAMING_SNAKE_CASE_ : Dict = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
SCREAMING_SNAKE_CASE_ : Optional[Any] = (temp + val) / denom
new_val.append(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = new_val
return [float(SCREAMING_SNAKE_CASE_ ) for i in new_val]
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : NDArray[floataa] ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Dict = table.shape
SCREAMING_SNAKE_CASE_ : Tuple = True
for i in range(0 , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ : int = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 68 | 0 |
'''simple docstring'''
from __future__ import annotations
import os
from collections.abc import Mapping
snake_case_ = tuple[int, int]
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self , lowercase__ , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : set[int] = vertices
SCREAMING_SNAKE_CASE_ : dict[EdgeT, int] = {
(min(__lowerCamelCase ), max(__lowerCamelCase )): weight for edge, weight in edges.items()
}
def __lowerCamelCase ( self , lowercase__ , lowercase__ ):
"""simple docstring"""
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
SCREAMING_SNAKE_CASE_ : int = weight
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Graph = Graph({min(self.vertices )} , {} )
SCREAMING_SNAKE_CASE_ : EdgeT
SCREAMING_SNAKE_CASE_ : int
SCREAMING_SNAKE_CASE_ : EdgeT
SCREAMING_SNAKE_CASE_ : int
while len(subgraph.vertices ) < len(self.vertices ):
SCREAMING_SNAKE_CASE_ : Tuple = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
SCREAMING_SNAKE_CASE_ : Optional[Any] = edge
SCREAMING_SNAKE_CASE_ : Dict = weight
subgraph.add_edge(__lowerCamelCase , __lowerCamelCase )
return subgraph
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Union[str, Any] = "p107_network.txt" ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = os.path.abspath(os.path.dirname(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ : str = os.path.join(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE_ : dict[EdgeT, int] = {}
SCREAMING_SNAKE_CASE_ : list[str]
SCREAMING_SNAKE_CASE_ : int
SCREAMING_SNAKE_CASE_ : int
with open(_lowerCamelCase ) as f:
SCREAMING_SNAKE_CASE_ : Tuple = f.read().strip().split("\n" )
SCREAMING_SNAKE_CASE_ : List[str] = [line.split("," ) for line in data]
for edgea in range(1 , len(_lowerCamelCase ) ):
for edgea in range(_lowerCamelCase ):
if adjaceny_matrix[edgea][edgea] != "-":
SCREAMING_SNAKE_CASE_ : Optional[int] = int(adjaceny_matrix[edgea][edgea] )
SCREAMING_SNAKE_CASE_ : Graph = Graph(set(range(len(_lowerCamelCase ) ) ) , _lowerCamelCase )
SCREAMING_SNAKE_CASE_ : Graph = graph.prims_algorithm()
SCREAMING_SNAKE_CASE_ : int = sum(graph.edges.values() )
SCREAMING_SNAKE_CASE_ : int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F'''{solution() = }''')
| 717 |
'''simple docstring'''
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> int:
"""simple docstring"""
return 1 if input_a == input_a else 0
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 68 | 0 |
'''simple docstring'''
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> str:
"""simple docstring"""
if version.parse(hfh.__version__ ).release < version.parse("0.11.0" ).release:
# old versions of hfh don't url-encode the file path
SCREAMING_SNAKE_CASE_ : List[Any] = quote(a_ )
return hfh.hf_hub_url(a_ , a_ , repo_type="dataset" , revision=a_ )
| 718 |
'''simple docstring'''
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , ) -> float:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
SCREAMING_SNAKE_CASE_ : int = 1 - (matter_density + radiation_density + dark_energy)
SCREAMING_SNAKE_CASE_ : List[Any] = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
SCREAMING_SNAKE_CASE_ : Dict = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
snake_case_ = 0.3
print(
hubble_parameter(
hubble_constant=6_8.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 68 | 0 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def __lowerCamelCase ( ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = HfArgumentParser(_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = parser.parse_args_into_dataclasses()[0]
SCREAMING_SNAKE_CASE_ : Any = TensorFlowBenchmark(args=_A )
try:
SCREAMING_SNAKE_CASE_ : int = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
SCREAMING_SNAKE_CASE_ : Tuple = "Arg --no_{0} is no longer used, please use --no-{0} instead."
SCREAMING_SNAKE_CASE_ : Union[str, Any] = " ".join(str(_A ).split(" " )[:-1] )
SCREAMING_SNAKE_CASE_ : Tuple = ""
SCREAMING_SNAKE_CASE_ : Optional[int] = eval(str(_A ).split(" " )[-1] )
SCREAMING_SNAKE_CASE_ : List[str] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(_A )
if len(_A ) > 0:
SCREAMING_SNAKE_CASE_ : Tuple = full_error_msg + begin_error_msg + str(_A )
raise ValueError(_A )
benchmark.run()
if __name__ == "__main__":
main()
| 719 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = [[1, 2, 4], [1, 2, 3, 4]]
SCREAMING_SNAKE_CASE_ : Any = DisjunctiveConstraint(lowercase__ )
self.assertTrue(isinstance(dc.token_ids , lowercase__ ) )
with self.assertRaises(lowercase__ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(lowercase__ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(lowercase__ ):
DisjunctiveConstraint(lowercase__ ) # fails here
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = [[1, 2, 3], [1, 2, 4]]
SCREAMING_SNAKE_CASE_ : Optional[Any] = DisjunctiveConstraint(lowercase__ )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Dict = dc.update(1 )
SCREAMING_SNAKE_CASE_ : Optional[int] = stepped is True and completed is False and reset is False
self.assertTrue(lowercase__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Tuple = dc.update(2 )
SCREAMING_SNAKE_CASE_ : Tuple = stepped is True and completed is False and reset is False
self.assertTrue(lowercase__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Any = dc.update(3 )
SCREAMING_SNAKE_CASE_ : Tuple = stepped is True and completed is True and reset is False
self.assertTrue(lowercase__ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
SCREAMING_SNAKE_CASE_ : Dict = DisjunctiveConstraint(lowercase__ )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Dict = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : List[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Dict = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : List[Any] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Tuple = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : str = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : str = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 68 | 0 |
'''simple docstring'''
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int ) -> float:
"""simple docstring"""
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720 |
'''simple docstring'''
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase,_UpperCAmelCase,unittest.TestCase ):
_A = VQModel
_A = "sample"
@property
def __lowerCamelCase ( self , lowercase__=(32, 32) ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = 4
SCREAMING_SNAKE_CASE_ : str = 3
SCREAMING_SNAKE_CASE_ : str = floats_tensor((batch_size, num_channels) + sizes ).to(lowercase__ )
return {"sample": image}
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
return (3, 32, 32)
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
return (3, 32, 32)
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 3,
}
SCREAMING_SNAKE_CASE_ : int = self.dummy_input
return init_dict, inputs_dict
def __lowerCamelCase ( self ):
"""simple docstring"""
pass
def __lowerCamelCase ( self ):
"""simple docstring"""
pass
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : int = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = VQModel.from_pretrained("fusing/vqgan-dummy" )
model.to(lowercase__ ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
SCREAMING_SNAKE_CASE_ : str = image.to(lowercase__ )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : int = model(lowercase__ ).sample
SCREAMING_SNAKE_CASE_ : Any = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] )
# fmt: on
self.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-3 ) )
| 68 | 0 |
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : int | None = None , SCREAMING_SNAKE_CASE_ : int | None = None ) -> Tuple:
"""simple docstring"""
if start is None:
SCREAMING_SNAKE_CASE_ : int = 0
if end is None:
SCREAMING_SNAKE_CASE_ : int = len(__UpperCamelCase ) - 1
if start >= end:
return
SCREAMING_SNAKE_CASE_ : str = (start + end) // 2
slowsort(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
slowsort(__UpperCamelCase , mid + 1 , __UpperCamelCase )
if sequence[end] < sequence[mid]:
SCREAMING_SNAKE_CASE_ : str = sequence[mid], sequence[end]
slowsort(__UpperCamelCase , __UpperCamelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 721 |
'''simple docstring'''
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = logging.get_logger()
# the current default level is logging.WARNING
SCREAMING_SNAKE_CASE_ : Optional[int] = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = logging.get_verbosity()
SCREAMING_SNAKE_CASE_ : List[Any] = logging.get_logger("transformers.models.bart.tokenization_bart" )
SCREAMING_SNAKE_CASE_ : List[Any] = "Testing 1, 2, 3"
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(lowercase__ ) as cl:
logger.warning(lowercase__ )
self.assertEqual(cl.out , msg + "\n" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(lowercase__ ) as cl:
logger.warning(lowercase__ )
self.assertEqual(cl.out , "" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(lowercase__ ) as cl:
logger.warning(lowercase__ )
self.assertEqual(cl.out , msg + "\n" )
# restore to the original level
logging.set_verbosity(lowercase__ )
@mockenv(TRANSFORMERS_VERBOSITY="error" )
def __lowerCamelCase ( self ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
SCREAMING_SNAKE_CASE_ : Tuple = logging.get_logger("transformers.models.bart.tokenization_bart" )
SCREAMING_SNAKE_CASE_ : int = os.getenv("TRANSFORMERS_VERBOSITY" , lowercase__ )
SCREAMING_SNAKE_CASE_ : Dict = logging.log_levels[env_level_str]
SCREAMING_SNAKE_CASE_ : str = logging.get_verbosity()
self.assertEqual(
lowercase__ , lowercase__ , F"TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}" , )
# restore to the original level
SCREAMING_SNAKE_CASE_ : Optional[int] = ""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="super-error" )
def __lowerCamelCase ( self ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
SCREAMING_SNAKE_CASE_ : List[Any] = logging.logging.getLogger()
with CaptureLogger(lowercase__ ) as cl:
# this action activates the env var
logging.get_logger("transformers.models.bart.tokenization_bart" )
self.assertIn("Unknown option TRANSFORMERS_VERBOSITY=super-error" , cl.out )
# no need to restore as nothing was changed
def __lowerCamelCase ( self ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
SCREAMING_SNAKE_CASE_ : str = logging.get_logger("transformers.models.bart.tokenization_bart" )
SCREAMING_SNAKE_CASE_ : List[Any] = "Testing 1, 2, 3"
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="1" ):
# nothing should be logged as env var disables this method
with CaptureLogger(lowercase__ ) as cl:
logger.warning_advice(lowercase__ )
self.assertEqual(cl.out , "" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(lowercase__ ) as cl:
logger.warning_advice(lowercase__ )
self.assertEqual(cl.out , msg + "\n" )
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 68 | 0 |
'''simple docstring'''
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("dataset_size" , [None, 4_0_0 * 2**2_0, 6_0_0 * 2**2_0] )
@pytest.mark.parametrize("input_in_memory_max_size" , ["default", 0, 1_0_0 * 2**2_0, 9_0_0 * 2**2_0] )
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Dict:
"""simple docstring"""
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , "IN_MEMORY_MAX_SIZE" , snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
SCREAMING_SNAKE_CASE_ : Dict = dataset_size < in_memory_max_size
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = is_small_dataset(snake_case__ )
assert result == expected
| 700 |
'''simple docstring'''
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.nn.Linear(2 , 4 )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.optim.AdamW(model.parameters() , lr=1.0 )
SCREAMING_SNAKE_CASE_ : Any = torch.optim.lr_scheduler.OneCycleLR(SCREAMING_SNAKE_CASE_ , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
SCREAMING_SNAKE_CASE_ : Dict = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
SCREAMING_SNAKE_CASE_ : Tuple = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[str] ) -> Tuple:
"""simple docstring"""
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Any ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
@require_cuda
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(lowercase__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = Accelerator(cpu=lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Accelerator()
SCREAMING_SNAKE_CASE_ : Any = GradientState()
assert state.num_steps == 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 4
assert state.num_steps == 4
assert state.sync_gradients is True
SCREAMING_SNAKE_CASE_ : Optional[int] = False
assert state.sync_gradients is False
GradientState._reset_state()
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = Accelerator()
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Optional[int] = create_components()
(
(
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
),
) : Optional[Any] = accelerator.prepare(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = Accelerator()
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : str = create_components()
accelerator.prepare(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def __lowerCamelCase ( self ):
"""simple docstring"""
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*lowercase__ , **lowercase__ ):
pass
with patch("torch.cuda.set_device" , lowercase__ ), patch_environment(ACCELERATE_TORCH_DEVICE="cuda:64" ):
SCREAMING_SNAKE_CASE_ : List[str] = Accelerator()
self.assertEqual(str(accelerator.state.device ) , "cuda:64" )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = Accelerator()
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Dict = create_components()
accelerator.prepare(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = get_signature(lowercase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(lowercase__ )
# make sure random weights don't match
load_random_weights(lowercase__ )
self.assertTrue(abs(model_signature - get_signature(lowercase__ ) ) > 1e-3 )
# make sure loaded weights match
accelerator.load_state(lowercase__ )
self.assertTrue(abs(model_signature - get_signature(lowercase__ ) ) < 1e-3 )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = Accelerator()
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Optional[int] = create_components()
accelerator.prepare(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_signature(lowercase__ )
# saving hook
def save_config(lowercase__ , lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = {"class_name": models[0].__class__.__name__}
with open(os.path.join(lowercase__ , "data.json" ) , "w" ) as f:
json.dump(lowercase__ , lowercase__ )
# loading hook
def load_config(lowercase__ , lowercase__ ):
with open(os.path.join(lowercase__ , "data.json" ) , "r" ) as f:
SCREAMING_SNAKE_CASE_ : Any = json.load(lowercase__ )
SCREAMING_SNAKE_CASE_ : List[str] = config["class_name"]
SCREAMING_SNAKE_CASE_ : Dict = accelerator.register_save_state_pre_hook(lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = accelerator.register_load_state_pre_hook(lowercase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(lowercase__ )
# make sure random weights don't match with hooks
load_random_weights(lowercase__ )
self.assertTrue(abs(model_signature - get_signature(lowercase__ ) ) > 1e-3 )
# random class name to verify correct one is loaded
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "random"
# make sure loaded weights match with hooks
accelerator.load_state(lowercase__ )
self.assertTrue(abs(model_signature - get_signature(lowercase__ ) ) < 1e-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(lowercase__ )
# make sure random weights don't match with hooks removed
load_random_weights(lowercase__ )
self.assertTrue(abs(model_signature - get_signature(lowercase__ ) ) > 1e-3 )
# random class name to verify correct one is loaded
SCREAMING_SNAKE_CASE_ : Tuple = "random"
# make sure loaded weights match with hooks removed
accelerator.load_state(lowercase__ )
self.assertTrue(abs(model_signature - get_signature(lowercase__ ) ) < 1e-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = Accelerator()
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Tuple = create_components()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
# This should work
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Optional[int] = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
self.assertTrue(dummy_obj is None )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = Accelerator()
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Dict = create_components()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [1, 2, 3]
# This should work
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : int = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
self.assertEqual(
getattr(lowercase__ , "_is_accelerate_prepared" , lowercase__ ) , lowercase__ , "Dummy object should have `_is_accelerate_prepared` set to `True`" , )
self.assertEqual(
getattr(lowercase__ , "_is_accelerate_prepared" , lowercase__ ) , lowercase__ , "Model is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowercase__ , "_is_accelerate_prepared" , lowercase__ ) , lowercase__ , "Optimizer is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowercase__ , "_is_accelerate_prepared" , lowercase__ ) , lowercase__ , "Scheduler is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowercase__ , "_is_accelerate_prepared" , lowercase__ ) , lowercase__ , "Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowercase__ , "_is_accelerate_prepared" , lowercase__ ) , lowercase__ , "Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
@slow
@require_bnb
def __lowerCamelCase ( self ):
"""simple docstring"""
from transformers import AutoModelForCausalLM
SCREAMING_SNAKE_CASE_ : Optional[int] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=lowercase__ , device_map={"": 0} , )
SCREAMING_SNAKE_CASE_ : Optional[int] = Accelerator()
# This should work
SCREAMING_SNAKE_CASE_ : List[Any] = accelerator.prepare(lowercase__ )
@slow
@require_bnb
def __lowerCamelCase ( self ):
"""simple docstring"""
from transformers import AutoModelForCausalLM
SCREAMING_SNAKE_CASE_ : Optional[Any] = Accelerator()
with init_empty_weights():
SCREAMING_SNAKE_CASE_ : Tuple = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
SCREAMING_SNAKE_CASE_ : Optional[Any] = infer_auto_device_map(lowercase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = "cpu"
SCREAMING_SNAKE_CASE_ : Tuple = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , device_map=lowercase__ , load_in_abit=lowercase__ , llm_inta_enable_fpaa_cpu_offload=lowercase__ )
# This should not work and get value error
with self.assertRaises(lowercase__ ):
SCREAMING_SNAKE_CASE_ : str = accelerator.prepare(lowercase__ )
@slow
@require_bnb
@require_multi_gpu
def __lowerCamelCase ( self ):
"""simple docstring"""
from transformers import AutoModelForCausalLM
SCREAMING_SNAKE_CASE_ : str = {"distributed_type": DistributedType.MULTI_GPU}
with init_empty_weights():
SCREAMING_SNAKE_CASE_ : Optional[int] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
SCREAMING_SNAKE_CASE_ : str = infer_auto_device_map(lowercase__ )
SCREAMING_SNAKE_CASE_ : Dict = 1
SCREAMING_SNAKE_CASE_ : str = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=lowercase__ , device_map=lowercase__ , )
SCREAMING_SNAKE_CASE_ : Any = Accelerator()
# This should not work and get value error
with self.assertRaises(lowercase__ ):
SCREAMING_SNAKE_CASE_ : Tuple = accelerator.prepare(lowercase__ )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def __lowerCamelCase ( self ):
"""simple docstring"""
from transformers import AutoModelForCausalLM
with init_empty_weights():
SCREAMING_SNAKE_CASE_ : Optional[int] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = infer_auto_device_map(lowercase__ )
SCREAMING_SNAKE_CASE_ : List[str] = 1
SCREAMING_SNAKE_CASE_ : str = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=lowercase__ , device_map=lowercase__ , )
SCREAMING_SNAKE_CASE_ : Any = Accelerator()
# This should work
SCREAMING_SNAKE_CASE_ : Optional[int] = accelerator.prepare(lowercase__ )
@require_cuda
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = torch.nn.Linear(10 , 10 )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.optim.SGD(model.parameters() , lr=0.01 )
SCREAMING_SNAKE_CASE_ : Tuple = Accelerator(cpu=lowercase__ )
SCREAMING_SNAKE_CASE_ : Dict = accelerator.prepare(lowercase__ )
| 68 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> float:
"""simple docstring"""
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) )
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple ) -> list[list[list[float] | float]]:
"""simple docstring"""
if dataset.ndim != value_array.ndim:
SCREAMING_SNAKE_CASE_ : Optional[int] = (
"Wrong input data's dimensions... "
F"dataset : {dataset.ndim}, value_array : {value_array.ndim}"
)
raise ValueError(__SCREAMING_SNAKE_CASE )
try:
if dataset.shape[1] != value_array.shape[1]:
SCREAMING_SNAKE_CASE_ : Dict = (
"Wrong input data's shape... "
F"dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"
)
raise ValueError(__SCREAMING_SNAKE_CASE )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape" )
if dataset.dtype != value_array.dtype:
SCREAMING_SNAKE_CASE_ : List[Any] = (
"Input data have different datatype... "
F"dataset : {dataset.dtype}, value_array : {value_array.dtype}"
)
raise TypeError(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = []
for value in value_array:
SCREAMING_SNAKE_CASE_ : Optional[int] = euclidean(__SCREAMING_SNAKE_CASE , dataset[0] )
SCREAMING_SNAKE_CASE_ : Tuple = dataset[0].tolist()
for dataset_value in dataset[1:]:
SCREAMING_SNAKE_CASE_ : List[str] = euclidean(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if dist > temp_dist:
SCREAMING_SNAKE_CASE_ : List[str] = temp_dist
SCREAMING_SNAKE_CASE_ : List[Any] = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] ) -> float:
"""simple docstring"""
return np.dot(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) / (norm(__SCREAMING_SNAKE_CASE ) * norm(__SCREAMING_SNAKE_CASE ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'facebook/xmod-base': 'https://huggingface.co/facebook/xmod-base/resolve/main/config.json',
'facebook/xmod-large-prenorm': 'https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json',
'facebook/xmod-base-13-125k': 'https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json',
'facebook/xmod-base-30-125k': 'https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json',
'facebook/xmod-base-30-195k': 'https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json',
'facebook/xmod-base-60-125k': 'https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json',
'facebook/xmod-base-60-265k': 'https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json',
'facebook/xmod-base-75-125k': 'https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json',
'facebook/xmod-base-75-269k': 'https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
_A = "xmod"
def __init__( self , lowercase__=3_0522 , lowercase__=768 , lowercase__=12 , lowercase__=12 , lowercase__=3072 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=512 , lowercase__=2 , lowercase__=0.02 , lowercase__=1e-12 , lowercase__=1 , lowercase__=0 , lowercase__=2 , lowercase__="absolute" , lowercase__=True , lowercase__=None , lowercase__=False , lowercase__=2 , lowercase__=False , lowercase__=True , lowercase__=True , lowercase__=("en_XX",) , lowercase__=None , **lowercase__ , ):
"""simple docstring"""
super().__init__(pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__ )
SCREAMING_SNAKE_CASE_ : Tuple = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE_ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE_ : List[str] = hidden_act
SCREAMING_SNAKE_CASE_ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE_ : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Dict = type_vocab_size
SCREAMING_SNAKE_CASE_ : str = initializer_range
SCREAMING_SNAKE_CASE_ : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Tuple = position_embedding_type
SCREAMING_SNAKE_CASE_ : str = use_cache
SCREAMING_SNAKE_CASE_ : Optional[int] = classifier_dropout
SCREAMING_SNAKE_CASE_ : int = pre_norm
SCREAMING_SNAKE_CASE_ : Optional[int] = adapter_reduction_factor
SCREAMING_SNAKE_CASE_ : List[str] = adapter_layer_norm
SCREAMING_SNAKE_CASE_ : List[str] = adapter_reuse_layer_norm
SCREAMING_SNAKE_CASE_ : int = ln_before_adapter
SCREAMING_SNAKE_CASE_ : List[Any] = list(lowercase__ )
SCREAMING_SNAKE_CASE_ : Any = default_language
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ : Tuple = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 68 | 0 |
'''simple docstring'''
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
snake_case_ = logging.getLogger(__name__)
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name" , type=SCREAMING_SNAKE_CASE_ , default="wikitext" , help="Name of the training. Explore datasets at: hf.co/datasets." , )
parser.add_argument(
"--dataset_config" , type=SCREAMING_SNAKE_CASE_ , default="wikitext-103-raw-v1" , help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path" , type=SCREAMING_SNAKE_CASE_ , default="sayakpaul/unigram-tokenizer-wikitext" , help="Tokenizer identifier. Can be a local filepath or a Hub identifier." , )
parser.add_argument(
"--shard_size" , type=SCREAMING_SNAKE_CASE_ , default=1_0_0_0 , help="Number of entries to go in a single shard." , )
parser.add_argument("--split" , type=SCREAMING_SNAKE_CASE_ , default="train" , choices=["train", "test", "validation"] )
parser.add_argument(
"--limit" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , help="Limit the number of shards (used for debugging)." , )
parser.add_argument(
"--max_length" , type=SCREAMING_SNAKE_CASE_ , default=5_1_2 , help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8." , )
parser.add_argument(
"--output_dir" , default="tf-tpu" , type=SCREAMING_SNAKE_CASE_ , help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket." , )
SCREAMING_SNAKE_CASE_ : List[Any] = parser.parse_args()
return args
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Tuple ) -> Dict:
"""simple docstring"""
def fn(SCREAMING_SNAKE_CASE_ : int ):
return tokenizer(examples["text"] )
return fn
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = []
for i in range(len(tokenized_data["input_ids"] ) ):
SCREAMING_SNAKE_CASE_ : Dict = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
SCREAMING_SNAKE_CASE_ : Any = tf.train.Features(feature=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : List[str] = tf.train.Example(features=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = example.SerializeToString()
records.append(SCREAMING_SNAKE_CASE_ )
return records
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
SCREAMING_SNAKE_CASE_ : Any = min(len(SCREAMING_SNAKE_CASE_ ) , args.limit )
SCREAMING_SNAKE_CASE_ : int = dataset.select(range(SCREAMING_SNAKE_CASE_ ) )
print(F"Limiting the dataset to {args.limit} entries." )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
SCREAMING_SNAKE_CASE_ : List[str] = os.path.join(args.output_dir , args.split )
if not os.path.exists(SCREAMING_SNAKE_CASE_ ):
os.makedirs(SCREAMING_SNAKE_CASE_ )
else:
SCREAMING_SNAKE_CASE_ : Dict = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenize_function(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Any = dataset.map(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , num_proc=4 , remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(SCREAMING_SNAKE_CASE_ : Any ):
# Concatenate all texts.
SCREAMING_SNAKE_CASE_ : Optional[int] = {k: sum(examples[k] , [] ) for k in examples.keys()}
SCREAMING_SNAKE_CASE_ : List[Any] = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
SCREAMING_SNAKE_CASE_ : Tuple = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
k: [t[i : i + args.max_length] for i in range(0 , SCREAMING_SNAKE_CASE_ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
SCREAMING_SNAKE_CASE_ : Dict = dataset_tokenized.map(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , batch_size=1_0_0_0 , num_proc=4 )
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
SCREAMING_SNAKE_CASE_ : str = 0
for shard in range(0 , len(SCREAMING_SNAKE_CASE_ ) , args.shard_size ):
SCREAMING_SNAKE_CASE_ : int = grouped_dataset[shard : shard + args.shard_size]
SCREAMING_SNAKE_CASE_ : int = len(dataset_snapshot["input_ids"] )
SCREAMING_SNAKE_CASE_ : List[Any] = os.path.join(SCREAMING_SNAKE_CASE_ , F"dataset-{shard_count}-{records_containing}.tfrecord" )
SCREAMING_SNAKE_CASE_ : Tuple = get_serialized_examples(SCREAMING_SNAKE_CASE_ )
with tf.io.TFRecordWriter(SCREAMING_SNAKE_CASE_ ) as out_file:
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = serialized_examples[i]
out_file.write(SCREAMING_SNAKE_CASE_ )
print("Wrote file {} containing {} records".format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
shard_count += 1
total_records += records_containing
with open(F"split-{args.split}-records-count.txt" , "w" ) as f:
print(F"Total {args.split} records: {total_records}" , file=SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
snake_case_ = parse_args()
main(args)
| 702 |
'''simple docstring'''
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 68 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
def __init__( self , lowercase__ , lowercase__=13 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=False , lowercase__=True , lowercase__=99 , lowercase__=32 , lowercase__=5 , lowercase__=4 , lowercase__=37 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=512 , lowercase__=16 , lowercase__=2 , lowercase__=0.02 , lowercase__=3 , lowercase__=4 , lowercase__=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = parent
SCREAMING_SNAKE_CASE_ : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = seq_length
SCREAMING_SNAKE_CASE_ : str = is_training
SCREAMING_SNAKE_CASE_ : Any = use_input_mask
SCREAMING_SNAKE_CASE_ : Tuple = use_token_type_ids
SCREAMING_SNAKE_CASE_ : Optional[int] = use_labels
SCREAMING_SNAKE_CASE_ : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE_ : Any = hidden_size
SCREAMING_SNAKE_CASE_ : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Dict = intermediate_size
SCREAMING_SNAKE_CASE_ : Dict = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : int = type_vocab_size
SCREAMING_SNAKE_CASE_ : int = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Dict = initializer_range
SCREAMING_SNAKE_CASE_ : List[Any] = num_labels
SCREAMING_SNAKE_CASE_ : str = num_choices
SCREAMING_SNAKE_CASE_ : Tuple = scope
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ : Dict = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
SCREAMING_SNAKE_CASE_ : Tuple = None
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self ):
"""simple docstring"""
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = DistilBertModel(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE_ : str = model(__A , __A )
SCREAMING_SNAKE_CASE_ : Tuple = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = DistilBertForMaskedLM(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE_ : List[Any] = model(__A , attention_mask=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = DistilBertForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(
__A , attention_mask=__A , start_positions=__A , end_positions=__A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Dict = DistilBertForSequenceClassification(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE_ : List[str] = model(__A , attention_mask=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Tuple = DistilBertForTokenClassification(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE_ : Tuple = model(__A , attention_mask=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.num_choices
SCREAMING_SNAKE_CASE_ : List[Any] = DistilBertForMultipleChoice(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE_ : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE_ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE_ : List[Any] = model(
__A , attention_mask=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.prepare_config_and_inputs()
(SCREAMING_SNAKE_CASE_) : Optional[Any] = config_and_inputs
SCREAMING_SNAKE_CASE_ : List[str] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_,UpperCamelCase_,unittest.TestCase ):
_A = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
_A = (
{
"feature-extraction": DistilBertModel,
"fill-mask": DistilBertForMaskedLM,
"question-answering": DistilBertForQuestionAnswering,
"text-classification": DistilBertForSequenceClassification,
"token-classification": DistilBertForTokenClassification,
"zero-shot": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_A = True
_A = True
_A = True
_A = True
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = DistilBertModelTester(self )
SCREAMING_SNAKE_CASE_ : str = ConfigTester(self , config_class=__A , dim=37 )
def __lowerCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__A )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__A )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__A )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__A )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__A )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__A )
@slow
def __lowerCamelCase ( self ):
"""simple docstring"""
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : List[str] = DistilBertModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@slow
@require_torch_gpu
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
SCREAMING_SNAKE_CASE_ : Any = True
SCREAMING_SNAKE_CASE_ : int = model_class(config=__A )
SCREAMING_SNAKE_CASE_ : Tuple = self._prepare_for_class(__A , __A )
SCREAMING_SNAKE_CASE_ : Any = torch.jit.trace(
__A , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__A , os.path.join(__A , "traced_model.pt" ) )
SCREAMING_SNAKE_CASE_ : int = torch.jit.load(os.path.join(__A , "traced_model.pt" ) , map_location=__A )
loaded(inputs_dict["input_ids"].to(__A ) , inputs_dict["attention_mask"].to(__A ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = DistilBertModel.from_pretrained("distilbert-base-uncased" )
SCREAMING_SNAKE_CASE_ : int = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(__A , attention_mask=__A )[0]
SCREAMING_SNAKE_CASE_ : Any = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor(
[[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __A , atol=1e-4 ) )
| 703 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'Intel/dpt-large': 'https://huggingface.co/Intel/dpt-large/resolve/main/config.json',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
_A = "dpt"
def __init__( self , lowercase__=768 , lowercase__=12 , lowercase__=12 , lowercase__=3072 , lowercase__="gelu" , lowercase__=0.0 , lowercase__=0.0 , lowercase__=0.02 , lowercase__=1e-12 , lowercase__=384 , lowercase__=16 , lowercase__=3 , lowercase__=False , lowercase__=True , lowercase__=[2, 5, 8, 11] , lowercase__="project" , lowercase__=[4, 2, 1, 0.5] , lowercase__=[96, 192, 384, 768] , lowercase__=256 , lowercase__=-1 , lowercase__=False , lowercase__=True , lowercase__=0.4 , lowercase__=255 , lowercase__=0.1 , lowercase__=[1, 1024, 24, 24] , lowercase__=[0, 1] , lowercase__=None , **lowercase__ , ):
"""simple docstring"""
super().__init__(**lowercase__ )
SCREAMING_SNAKE_CASE_ : Any = hidden_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("Initializing the config with a `BiT` backbone." )
SCREAMING_SNAKE_CASE_ : Tuple = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BitConfig(**lowercase__ )
elif isinstance(lowercase__ , lowercase__ ):
logger.info("Initializing the config with a `BiT` backbone." )
SCREAMING_SNAKE_CASE_ : Dict = BitConfig(**lowercase__ )
elif isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = backbone_config
else:
raise ValueError(
F"backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}." )
SCREAMING_SNAKE_CASE_ : List[Any] = backbone_featmap_shape
SCREAMING_SNAKE_CASE_ : Union[str, Any] = neck_ignore_stages
if readout_type != "project":
raise ValueError("Readout type must be 'project' when using `DPT-hybrid` mode." )
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
SCREAMING_SNAKE_CASE_ : int = None
SCREAMING_SNAKE_CASE_ : List[Any] = []
SCREAMING_SNAKE_CASE_ : int = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Tuple = intermediate_size
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE_ : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : str = initializer_range
SCREAMING_SNAKE_CASE_ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Any = image_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = patch_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = qkv_bias
SCREAMING_SNAKE_CASE_ : Optional[Any] = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("Readout_type must be one of ['ignore', 'add', 'project']" )
SCREAMING_SNAKE_CASE_ : Any = readout_type
SCREAMING_SNAKE_CASE_ : Optional[Any] = reassemble_factors
SCREAMING_SNAKE_CASE_ : str = neck_hidden_sizes
SCREAMING_SNAKE_CASE_ : Union[str, Any] = fusion_hidden_size
SCREAMING_SNAKE_CASE_ : Any = head_in_index
SCREAMING_SNAKE_CASE_ : str = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
SCREAMING_SNAKE_CASE_ : List[Any] = use_auxiliary_head
SCREAMING_SNAKE_CASE_ : int = auxiliary_loss_weight
SCREAMING_SNAKE_CASE_ : Union[str, Any] = semantic_loss_ignore_index
SCREAMING_SNAKE_CASE_ : Any = semantic_classifier_dropout
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
SCREAMING_SNAKE_CASE_ : List[str] = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE_ : Optional[int] = self.__class__.model_type
return output
| 68 | 0 |
'''simple docstring'''
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list ) -> int:
"""simple docstring"""
_enforce_args(_lowerCamelCase , _lowerCamelCase )
if n == 0:
return 0
SCREAMING_SNAKE_CASE_ : Dict = float("-inf" )
for i in range(1 , n + 1 ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = max(
_lowerCamelCase , prices[i - 1] + naive_cut_rod_recursive(n - i , _lowerCamelCase ) )
return max_revue
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list ) -> Dict:
"""simple docstring"""
_enforce_args(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE_ : Tuple = [float("-inf" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : list ) -> Tuple:
"""simple docstring"""
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
SCREAMING_SNAKE_CASE_ : str = float("-inf" )
for i in range(1 , n + 1 ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = max(
_lowerCamelCase , prices[i - 1] + _top_down_cut_rod_recursive(n - i , _lowerCamelCase , _lowerCamelCase ) , )
SCREAMING_SNAKE_CASE_ : Optional[int] = max_revenue
return max_rev[n]
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list ) -> str:
"""simple docstring"""
_enforce_args(_lowerCamelCase , _lowerCamelCase )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
SCREAMING_SNAKE_CASE_ : Any = [float("-inf" ) for _ in range(n + 1 )]
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
for i in range(1 , n + 1 ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_rev[i]
for j in range(1 , i + 1 ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max(_lowerCamelCase , prices[j - 1] + max_rev[i - j] )
SCREAMING_SNAKE_CASE_ : str = max_revenue_i
return max_rev[n]
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list ) -> str:
"""simple docstring"""
if n < 0:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = F"n must be greater than or equal to 0. Got n = {n}"
raise ValueError(_lowerCamelCase )
if n > len(_lowerCamelCase ):
SCREAMING_SNAKE_CASE_ : Any = (
"Each integral piece of rod must have a corresponding price. "
F"Got n = {n} but length of prices = {len(_lowerCamelCase )}"
)
raise ValueError(_lowerCamelCase )
def __lowerCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = [6, 1_0, 1_2, 1_5, 2_0, 2_3]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = len(_lowerCamelCase )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
SCREAMING_SNAKE_CASE_ : Optional[Any] = 3_6
SCREAMING_SNAKE_CASE_ : Tuple = top_down_cut_rod(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE_ : Dict = bottom_up_cut_rod(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = naive_cut_rod_recursive(_lowerCamelCase , _lowerCamelCase )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 704 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self , lowercase__ , lowercase__=7 , lowercase__=3 , lowercase__=18 , lowercase__=30 , lowercase__=400 , lowercase__=True , lowercase__=None , lowercase__=True , lowercase__=None , lowercase__=True , lowercase__=[0.48145466, 0.4578275, 0.40821073] , lowercase__=[0.26862954, 0.26130258, 0.27577711] , lowercase__=True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = size if size is not None else {"height": 224, "width": 224}
SCREAMING_SNAKE_CASE_ : Optional[Any] = crop_size if crop_size is not None else {"height": 18, "width": 18}
SCREAMING_SNAKE_CASE_ : str = parent
SCREAMING_SNAKE_CASE_ : List[Any] = batch_size
SCREAMING_SNAKE_CASE_ : Dict = num_channels
SCREAMING_SNAKE_CASE_ : Any = image_size
SCREAMING_SNAKE_CASE_ : Tuple = min_resolution
SCREAMING_SNAKE_CASE_ : Optional[Any] = max_resolution
SCREAMING_SNAKE_CASE_ : Tuple = do_resize
SCREAMING_SNAKE_CASE_ : List[str] = size
SCREAMING_SNAKE_CASE_ : str = do_center_crop
SCREAMING_SNAKE_CASE_ : List[str] = crop_size
SCREAMING_SNAKE_CASE_ : int = do_normalize
SCREAMING_SNAKE_CASE_ : Optional[int] = image_mean
SCREAMING_SNAKE_CASE_ : Dict = image_std
SCREAMING_SNAKE_CASE_ : List[Any] = do_convert_rgb
def __lowerCamelCase ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def __lowerCamelCase ( self , lowercase__=False , lowercase__=False , lowercase__=False ):
"""simple docstring"""
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
SCREAMING_SNAKE_CASE_ : str = []
for i in range(self.batch_size ):
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Optional[Any] = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
SCREAMING_SNAKE_CASE_ : str = [Image.fromarray(np.moveaxis(lowercase__ , 0 , -1 ) ) for x in image_inputs]
if torchify:
SCREAMING_SNAKE_CASE_ : List[str] = [torch.from_numpy(lowercase__ ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase,unittest.TestCase ):
_A = ChineseCLIPImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = ChineseCLIPImageProcessingTester(self , do_center_crop=lowercase__ )
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase__ , "do_resize" ) )
self.assertTrue(hasattr(lowercase__ , "size" ) )
self.assertTrue(hasattr(lowercase__ , "do_center_crop" ) )
self.assertTrue(hasattr(lowercase__ , "center_crop" ) )
self.assertTrue(hasattr(lowercase__ , "do_normalize" ) )
self.assertTrue(hasattr(lowercase__ , "image_mean" ) )
self.assertTrue(hasattr(lowercase__ , "image_std" ) )
self.assertTrue(hasattr(lowercase__ , "do_convert_rgb" ) )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 224, "width": 224} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
SCREAMING_SNAKE_CASE_ : str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def __lowerCamelCase ( self ):
"""simple docstring"""
pass
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ : Tuple = self.image_processor_tester.prepare_inputs(equal_resolution=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(lowercase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.image_processor_tester.prepare_inputs(equal_resolution=lowercase__ , numpify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ : List[str] = image_processing(lowercase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ : List[str] = self.image_processor_tester.prepare_inputs(equal_resolution=lowercase__ , torchify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ : int = image_processing(lowercase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase,unittest.TestCase ):
_A = ChineseCLIPImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=lowercase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 3
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase__ , "do_resize" ) )
self.assertTrue(hasattr(lowercase__ , "size" ) )
self.assertTrue(hasattr(lowercase__ , "do_center_crop" ) )
self.assertTrue(hasattr(lowercase__ , "center_crop" ) )
self.assertTrue(hasattr(lowercase__ , "do_normalize" ) )
self.assertTrue(hasattr(lowercase__ , "image_mean" ) )
self.assertTrue(hasattr(lowercase__ , "image_std" ) )
self.assertTrue(hasattr(lowercase__ , "do_convert_rgb" ) )
def __lowerCamelCase ( self ):
"""simple docstring"""
pass
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ : Optional[int] = self.image_processor_tester.prepare_inputs(equal_resolution=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ : List[str] = image_processing(lowercase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 68 | 0 |
'''simple docstring'''
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = """"""
for i in table:
res += inp[i - 1]
return res
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[Any] ) -> int:
"""simple docstring"""
return data[1:] + data[0]
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = """"""
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = int("0b" + data[0] + data[-1] , 2 )
SCREAMING_SNAKE_CASE_ : Tuple = int("0b" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Tuple ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = message[:4]
SCREAMING_SNAKE_CASE_ : Optional[Any] = message[4:]
SCREAMING_SNAKE_CASE_ : Any = apply_table(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : str = xor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Tuple = apply_sbox(SCREAMING_SNAKE_CASE_ , temp[:4] ) # noqa: E741
SCREAMING_SNAKE_CASE_ : Any = apply_sbox(SCREAMING_SNAKE_CASE_ , temp[4:] )
SCREAMING_SNAKE_CASE_ : List[str] = """0""" * (2 - len(SCREAMING_SNAKE_CASE_ )) + l # noqa: E741
SCREAMING_SNAKE_CASE_ : Tuple = """0""" * (2 - len(SCREAMING_SNAKE_CASE_ )) + r
SCREAMING_SNAKE_CASE_ : Optional[Any] = apply_table(l + r , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = xor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return temp + right
if __name__ == "__main__":
snake_case_ : Optional[Any] = input('Enter 10 bit key: ')
snake_case_ : List[str] = input('Enter 8 bit message: ')
snake_case_ : List[str] = [6, 3, 7, 4, 8, 5, 1_0, 9]
snake_case_ : Any = [3, 5, 2, 7, 4, 1_0, 1, 9, 8, 6]
snake_case_ : Union[str, Any] = [2, 4, 3, 1]
snake_case_ : int = [2, 6, 3, 1, 4, 8, 5, 7]
snake_case_ : Optional[Any] = [4, 1, 3, 5, 7, 2, 8, 6]
snake_case_ : Tuple = [4, 1, 2, 3, 2, 3, 4, 1]
snake_case_ : Union[str, Any] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
snake_case_ : Tuple = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
snake_case_ : Any = apply_table(key, paa_table)
snake_case_ : str = temp[:5]
snake_case_ : Tuple = temp[5:]
snake_case_ : Tuple = left_shift(left)
snake_case_ : List[Any] = left_shift(right)
snake_case_ : Any = apply_table(left + right, pa_table)
snake_case_ : List[Any] = left_shift(left)
snake_case_ : Optional[int] = left_shift(right)
snake_case_ : List[Any] = left_shift(left)
snake_case_ : str = left_shift(right)
snake_case_ : Tuple = apply_table(left + right, pa_table)
# encryption
snake_case_ : List[str] = apply_table(message, IP)
snake_case_ : int = function(expansion, sa, sa, keya, temp)
snake_case_ : Any = temp[4:] + temp[:4]
snake_case_ : List[Any] = function(expansion, sa, sa, keya, temp)
snake_case_ : Dict = apply_table(temp, IP_inv)
print('Cipher text is:', CT)
# decryption
snake_case_ : Optional[Any] = apply_table(CT, IP)
snake_case_ : int = function(expansion, sa, sa, keya, temp)
snake_case_ : Tuple = temp[4:] + temp[:4]
snake_case_ : Optional[Any] = function(expansion, sa, sa, keya, temp)
snake_case_ : List[Any] = apply_table(temp, IP_inv)
print('Plain text after decypting is:', PT)
| 705 |
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = str(SCREAMING_SNAKE_CASE_ )
return len(SCREAMING_SNAKE_CASE_ ) == 9 and set(SCREAMING_SNAKE_CASE_ ) == set("123456789" )
def __lowerCamelCase ( ) -> int | None:
"""simple docstring"""
for base_num in range(9_9_9_9 , 4_9_9_9 , -1 ):
SCREAMING_SNAKE_CASE_ : int = 1_0_0_0_0_2 * base_num
if is_9_pandigital(SCREAMING_SNAKE_CASE_ ):
return candidate
for base_num in range(3_3_3 , 9_9 , -1 ):
SCREAMING_SNAKE_CASE_ : List[str] = 1_0_0_2_0_0_3 * base_num
if is_9_pandigital(SCREAMING_SNAKE_CASE_ ):
return candidate
return None
if __name__ == "__main__":
print(F'''{solution() = }''')
| 68 | 0 |
'''simple docstring'''
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
snake_case_ = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class SCREAMING_SNAKE_CASE__ ( __a ):
def __init__( self , *lowercase__ , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ ):
"""simple docstring"""
super().__init__(*a_ , **a_ )
SCREAMING_SNAKE_CASE_ : int = eval_examples
SCREAMING_SNAKE_CASE_ : Optional[int] = post_process_function
SCREAMING_SNAKE_CASE_ : Dict = quant_trainer_args
SCREAMING_SNAKE_CASE_ : Optional[int] = 128 # default number of calibration samples
def __lowerCamelCase ( self , lowercase__=None ):
"""simple docstring"""
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("Trainer: calibration requires an calib_dataset." )
SCREAMING_SNAKE_CASE_ : List[str] = calib_dataset if calib_dataset is not None else self.calib_dataset
SCREAMING_SNAKE_CASE_ : List[Any] = self._remove_unused_columns(a_ , description="Calibration" )
return DataLoader(
a_ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=a_ , )
def __lowerCamelCase ( self , lowercase__=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.train_dataset if calib_dataset is None else calib_dataset
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_calib_dataloader(a_ )
SCREAMING_SNAKE_CASE_ : int = self.model
quant_trainer.configure_model(a_ , self.quant_trainer_args , calib=a_ )
model.eval()
quant_trainer.enable_calibration(a_ )
logger.info("***** Running calibration *****" )
logger.info(F" Num examples = {self.calib_num}" )
logger.info(F" Batch size = {calib_dataloader.batch_size}" )
for step, inputs in enumerate(a_ ):
# Prediction step
SCREAMING_SNAKE_CASE_ : str = self.prediction_step(a_ , a_ , prediction_loss_only=a_ )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(a_ , self.quant_trainer_args )
SCREAMING_SNAKE_CASE_ : str = model
def __lowerCamelCase ( self , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__ = "eval" ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self.eval_dataset if eval_dataset is None else eval_dataset
SCREAMING_SNAKE_CASE_ : int = self.get_eval_dataloader(a_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE_ : int = self.compute_metrics
SCREAMING_SNAKE_CASE_ : str = None
SCREAMING_SNAKE_CASE_ : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
SCREAMING_SNAKE_CASE_ : str = eval_loop(
a_ , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=a_ , )
finally:
SCREAMING_SNAKE_CASE_ : Any = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
SCREAMING_SNAKE_CASE_ : Any = self.post_process_function(a_ , a_ , output.predictions )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.compute_metrics(a_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
SCREAMING_SNAKE_CASE_ : Dict = metrics.pop(a_ )
self.log(a_ )
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
SCREAMING_SNAKE_CASE_ : List[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , a_ )
return metrics
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__=None , lowercase__ = "test" ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.get_test_dataloader(a_ )
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE_ : Dict = self.compute_metrics
SCREAMING_SNAKE_CASE_ : Optional[int] = None
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
SCREAMING_SNAKE_CASE_ : List[str] = eval_loop(
a_ , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=a_ , )
finally:
SCREAMING_SNAKE_CASE_ : List[str] = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
SCREAMING_SNAKE_CASE_ : List[Any] = self.post_process_function(a_ , a_ , output.predictions , "predict" )
SCREAMING_SNAKE_CASE_ : int = self.compute_metrics(a_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
SCREAMING_SNAKE_CASE_ : List[str] = metrics.pop(a_ )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=a_ )
def __lowerCamelCase ( self , lowercase__="./" ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self.eval_dataset
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_eval_dataloader(a_ )
SCREAMING_SNAKE_CASE_ : str = next(iter(a_ ) )
# saving device - to make it consistent
SCREAMING_SNAKE_CASE_ : List[Any] = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
# convert to tuple
SCREAMING_SNAKE_CASE_ : List[Any] = tuple(v.to(a_ ) for k, v in batch.items() )
logger.info("Converting model to be onnx compatible" )
from pytorch_quantization.nn import TensorQuantizer
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : str = self.model.to(a_ )
model.eval()
model.float()
SCREAMING_SNAKE_CASE_ : List[Any] = model.module if hasattr(a_ , "module" ) else model
quant_trainer.configure_model(a_ , self.quant_trainer_args )
SCREAMING_SNAKE_CASE_ : Dict = os.path.join(a_ , "model.onnx" )
logger.info(F"exporting model to {output_model_file}" )
SCREAMING_SNAKE_CASE_ : List[Any] = {0: """batch_size""", 1: """seq_len"""}
torch.onnx.export(
a_ , a_ , a_ , export_params=a_ , opset_version=13 , do_constant_folding=a_ , input_names=["input_ids", "attention_mask", "token_type_ids"] , output_names=["output_start_logits", "output_end_logits"] , dynamic_axes={
"input_ids": axes,
"attention_mask": axes,
"token_type_ids": axes,
"output_start_logits": axes,
"output_end_logits": axes,
} , verbose=a_ , )
logger.info("onnx export finished" )
| 706 |
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__( self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = True , lowercase__ = None , lowercase__ = False , lowercase__ = None , lowercase__ = True , lowercase__ = "arrow" , **lowercase__ , ):
"""simple docstring"""
super().__init__(
split=lowercase__ , features=lowercase__ , cache_dir=lowercase__ , keep_in_memory=lowercase__ , streaming=lowercase__ , **lowercase__ , )
SCREAMING_SNAKE_CASE_ : Any = load_from_cache_file
SCREAMING_SNAKE_CASE_ : Optional[int] = file_format
SCREAMING_SNAKE_CASE_ : List[Any] = Spark(
df=lowercase__ , features=lowercase__ , cache_dir=lowercase__ , working_dir=lowercase__ , **lowercase__ , )
def __lowerCamelCase ( self ):
"""simple docstring"""
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
SCREAMING_SNAKE_CASE_ : Optional[int] = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=lowercase__ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 68 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 707 |
'''simple docstring'''
import re
import string
import numpy as np
import datasets
snake_case_ = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
snake_case_ = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
snake_case_ = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION,_KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def __lowerCamelCase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , reference_urls=[] , )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__=None , lowercase__=False , lowercase__=False , lowercase__=False , ):
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array([re.sub(lowercase__ , "" , lowercase__ ) for x in predictions] )
SCREAMING_SNAKE_CASE_ : List[Any] = np.array([re.sub(lowercase__ , "" , lowercase__ ) for x in references] )
else:
SCREAMING_SNAKE_CASE_ : int = np.asarray(lowercase__ )
SCREAMING_SNAKE_CASE_ : Any = np.asarray(lowercase__ )
if ignore_case:
SCREAMING_SNAKE_CASE_ : Dict = np.char.lower(lowercase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = np.char.lower(lowercase__ )
if ignore_punctuation:
SCREAMING_SNAKE_CASE_ : Optional[int] = string.punctuation.maketrans("" , "" , string.punctuation )
SCREAMING_SNAKE_CASE_ : int = np.char.translate(lowercase__ , table=lowercase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.char.translate(lowercase__ , table=lowercase__ )
if ignore_numbers:
SCREAMING_SNAKE_CASE_ : Optional[int] = string.digits.maketrans("" , "" , string.digits )
SCREAMING_SNAKE_CASE_ : Dict = np.char.translate(lowercase__ , table=lowercase__ )
SCREAMING_SNAKE_CASE_ : int = np.char.translate(lowercase__ , table=lowercase__ )
SCREAMING_SNAKE_CASE_ : str = predictions == references
return {"exact_match": np.mean(lowercase__ ) * 100}
| 68 | 0 |
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(0 ) == 0 )
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 708 |
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case_ = logging.get_logger(__name__)
snake_case_ = {'vocab_file': 'spiece.model'}
snake_case_ = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
snake_case_ = {
'AI-Sweden/gpt-sw3-126m': 2_0_4_8,
'AI-Sweden/gpt-sw3-350m': 2_0_4_8,
'AI-Sweden/gpt-sw3-1.6b': 2_0_4_8,
'AI-Sweden/gpt-sw3-6.7b': 2_0_4_8,
'AI-Sweden/gpt-sw3-20b': 2_0_4_8,
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = ["input_ids", "attention_mask"]
def __init__( self , lowercase__ , lowercase__=False , lowercase__=False , lowercase__=False , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__ = None , **lowercase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
SCREAMING_SNAKE_CASE_ : Dict = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
SCREAMING_SNAKE_CASE_ : str = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
SCREAMING_SNAKE_CASE_ : List[Any] = "<|endoftext|>" if eos_token is None else eos_token
SCREAMING_SNAKE_CASE_ : Dict = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
SCREAMING_SNAKE_CASE_ : Tuple = unk_token if pad_token is None else pad_token
SCREAMING_SNAKE_CASE_ : Optional[Any] = eos_token if bos_token is None else bos_token
else:
SCREAMING_SNAKE_CASE_ : int = "<pad>" if pad_token is None else pad_token
SCREAMING_SNAKE_CASE_ : Any = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=lowercase__ , remove_space=lowercase__ , keep_accents=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , unk_token=lowercase__ , pad_token=lowercase__ , sp_model_kwargs=self.sp_model_kwargs , **lowercase__ , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = do_lower_case
SCREAMING_SNAKE_CASE_ : Optional[int] = remove_space
SCREAMING_SNAKE_CASE_ : int = keep_accents
SCREAMING_SNAKE_CASE_ : List[Any] = vocab_file
SCREAMING_SNAKE_CASE_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase__ )
# Used for whitespace normalization in input texts
# fmt : off
SCREAMING_SNAKE_CASE_ : int = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(
F"[{''.join(map(lowercase__ , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]" )
def __getstate__( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : Dict = None
return state
def __setstate__( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {}
SCREAMING_SNAKE_CASE_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def __lowerCamelCase ( self ):
"""simple docstring"""
return len(self.sp_model )
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.non_printing_characters_re.sub("" , lowercase__ )
# Normalize whitespaces
SCREAMING_SNAKE_CASE_ : List[str] = "".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
SCREAMING_SNAKE_CASE_ : List[Any] = unicodedata.normalize("NFC" , lowercase__ )
return text
def __lowerCamelCase ( self , lowercase__ , **lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.preprocess_text(lowercase__ )
return self.sp_model.encode(lowercase__ , out_type=lowercase__ )
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
return self.sp_model.PieceToId(lowercase__ )
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
return self.sp_model.IdToPiece(lowercase__ )
@staticmethod
def __lowerCamelCase ( lowercase__ ):
"""simple docstring"""
return out_string
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
SCREAMING_SNAKE_CASE_ : Any = ""
SCREAMING_SNAKE_CASE_ : Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase__ ) + token
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
SCREAMING_SNAKE_CASE_ : int = []
else:
current_sub_tokens.append(lowercase__ )
SCREAMING_SNAKE_CASE_ : List[str] = False
out_string += self.sp_model.decode(lowercase__ )
return out_string
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = {self.convert_ids_to_tokens(lowercase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCamelCase ( self , lowercase__ , lowercase__ = None ):
"""simple docstring"""
if not os.path.isdir(lowercase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE_ : Any = os.path.join(
lowercase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase__ , "wb" ) as fi:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(lowercase__ )
return (out_vocab_file,)
def __lowerCamelCase ( self , lowercase__ , lowercase__ = False ):
"""simple docstring"""
if isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = self.preprocess_text(lowercase__ )
SCREAMING_SNAKE_CASE_ : Any = self.sp_model.encode(lowercase__ )
else:
SCREAMING_SNAKE_CASE_ : str = [self.preprocess_text(lowercase__ ) for t in text]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.sp_model.encode(lowercase__ )
if return_tensors is True or return_tensors == "pt":
SCREAMING_SNAKE_CASE_ : str = torch.tensor(lowercase__ )
return token_ids
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
return self.sp_model.decode(lowercase__ )
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [F"User: {text}" if is_user else F"Bot: {text}" for is_user, text in conversation.iter_texts()]
SCREAMING_SNAKE_CASE_ : List[str] = (
F"{self.eos_token}{self.bos_token}" + F"{self.bos_token}".join(lowercase__ ) + F"{self.bos_token}Bot:"
)
return self.encode(text=lowercase__ )
| 68 | 0 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
snake_case_ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-classification/requirements.txt')
snake_case_ = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
snake_case_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
with open(_lowercase , "rb" ) as f:
SCREAMING_SNAKE_CASE_ : List[str] = Image.open(_lowercase )
return im.convert("RGB" )
@dataclass
class SCREAMING_SNAKE_CASE__ :
_A = field(
default=a__,metadata={
"help": "Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)."
},)
_A = field(
default=a__,metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
_A = field(default=a__,metadata={"help": "A folder containing the training data."} )
_A = field(default=a__,metadata={"help": "A folder containing the validation data."} )
_A = field(
default=0.15,metadata={"help": "Percent to split off of train for validation."} )
_A = field(
default=a__,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
},)
_A = field(
default=a__,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
},)
def __lowerCamelCase ( self ):
"""simple docstring"""
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"You must specify either a dataset name from the hub or a train and/or validation directory." )
@dataclass
class SCREAMING_SNAKE_CASE__ :
_A = field(
default="google/vit-base-patch16-224-in21k",metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"},)
_A = field(
default=a__,metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(a__ )},)
_A = field(
default=a__,metadata={"help": "Pretrained config name or path if not the same as model_name"} )
_A = field(
default=a__,metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} )
_A = field(
default="main",metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},)
_A = field(default=a__,metadata={"help": "Name or path of preprocessor config."} )
_A = field(
default=a__,metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
},)
_A = field(
default=a__,metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."},)
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Tuple ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = torch.stack([example["pixel_values"] for example in examples] )
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor([example["labels"] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE_ : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE_ : int = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_image_classification" , _lowercase , _lowercase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ : Optional[int] = training_args.get_process_log_level()
logger.setLevel(_lowercase )
transformers.utils.logging.set_verbosity(_lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE_ : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE_ : Optional[int] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
SCREAMING_SNAKE_CASE_ : Tuple = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task="image-classification" , use_auth_token=True if model_args.use_auth_token else None , )
else:
SCREAMING_SNAKE_CASE_ : Tuple = {}
if data_args.train_dir is not None:
SCREAMING_SNAKE_CASE_ : int = os.path.join(data_args.train_dir , "**" )
if data_args.validation_dir is not None:
SCREAMING_SNAKE_CASE_ : Any = os.path.join(data_args.validation_dir , "**" )
SCREAMING_SNAKE_CASE_ : Optional[Any] = load_dataset(
"imagefolder" , data_files=_lowercase , cache_dir=model_args.cache_dir , task="image-classification" , )
# If we don't have a validation split, split off a percentage of train as validation.
SCREAMING_SNAKE_CASE_ : int = None if "validation" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _lowercase ) and data_args.train_val_split > 0.0:
SCREAMING_SNAKE_CASE_ : int = dataset["train"].train_test_split(data_args.train_val_split )
SCREAMING_SNAKE_CASE_ : str = split["train"]
SCREAMING_SNAKE_CASE_ : Dict = split["test"]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
SCREAMING_SNAKE_CASE_ : List[str] = dataset["train"].features["labels"].names
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {}, {}
for i, label in enumerate(_lowercase ):
SCREAMING_SNAKE_CASE_ : int = str(_lowercase )
SCREAMING_SNAKE_CASE_ : List[Any] = label
# Load the accuracy metric from the datasets package
SCREAMING_SNAKE_CASE_ : Union[str, Any] = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(SCREAMING_SNAKE_CASE_ : Optional[int] ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
SCREAMING_SNAKE_CASE_ : Any = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_lowercase ) , labelaid=_lowercase , idalabel=_lowercase , finetuning_task="image-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE_ : List[str] = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
SCREAMING_SNAKE_CASE_ : List[Any] = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
SCREAMING_SNAKE_CASE_ : str = image_processor.size["shortest_edge"]
else:
SCREAMING_SNAKE_CASE_ : List[str] = (image_processor.size["height"], image_processor.size["width"])
SCREAMING_SNAKE_CASE_ : str = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
SCREAMING_SNAKE_CASE_ : int = Compose(
[
RandomResizedCrop(_lowercase ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
SCREAMING_SNAKE_CASE_ : List[str] = Compose(
[
Resize(_lowercase ),
CenterCrop(_lowercase ),
ToTensor(),
normalize,
] )
def train_transforms(SCREAMING_SNAKE_CASE_ : List[str] ):
SCREAMING_SNAKE_CASE_ : List[Any] = [
_train_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]
]
return example_batch
def val_transforms(SCREAMING_SNAKE_CASE_ : List[Any] ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [_val_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE_ : Optional[int] = (
dataset["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(_lowercase )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE_ : Any = (
dataset["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(_lowercase )
# Initalize our trainer
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Trainer(
model=_lowercase , args=_lowercase , train_dataset=dataset["train"] if training_args.do_train else None , eval_dataset=dataset["validation"] if training_args.do_eval else None , compute_metrics=_lowercase , tokenizer=_lowercase , data_collator=_lowercase , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE_ : List[str] = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE_ : List[str] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE_ : Dict = last_checkpoint
SCREAMING_SNAKE_CASE_ : List[str] = trainer.train(resume_from_checkpoint=_lowercase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
SCREAMING_SNAKE_CASE_ : Dict = trainer.evaluate()
trainer.log_metrics("eval" , _lowercase )
trainer.save_metrics("eval" , _lowercase )
# Write model card and (optionally) push to hub
SCREAMING_SNAKE_CASE_ : List[str] = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "image-classification",
"dataset": data_args.dataset_name,
"tags": ["image-classification", "vision"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_lowercase )
else:
trainer.create_model_card(**_lowercase )
if __name__ == "__main__":
main()
| 709 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
snake_case_ = True
except (ImportError, ModuleNotFoundError):
snake_case_ = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
re.sub("<n>" , "" , SCREAMING_SNAKE_CASE_ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(SCREAMING_SNAKE_CASE_ ) )
| 68 | 0 |
'''simple docstring'''
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : bytes ) -> List[str]:
"""simple docstring"""
return "".join([hex(UpperCAmelCase__ )[2:].zfill(2 ).upper() for byte in list(UpperCAmelCase__ )] )
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : str ) -> Tuple:
"""simple docstring"""
if (len(UpperCAmelCase__ ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(UpperCAmelCase__ ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 1_6 ) for i in range(0 , len(UpperCAmelCase__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self , lowercase__ , lowercase__=2 , lowercase__=3 , lowercase__=4 , lowercase__=2 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=99 , lowercase__=36 , lowercase__=2 , lowercase__=4 , lowercase__=37 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=512 , lowercase__=16 , lowercase__=2 , lowercase__=0.02 , lowercase__=6 , lowercase__=6 , lowercase__=3 , lowercase__=4 , lowercase__=None , lowercase__=1000 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = parent
SCREAMING_SNAKE_CASE_ : Optional[int] = batch_size
SCREAMING_SNAKE_CASE_ : Dict = num_channels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_size
SCREAMING_SNAKE_CASE_ : Optional[int] = patch_size
SCREAMING_SNAKE_CASE_ : str = is_training
SCREAMING_SNAKE_CASE_ : str = use_input_mask
SCREAMING_SNAKE_CASE_ : Any = use_token_type_ids
SCREAMING_SNAKE_CASE_ : int = use_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE_ : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Any = num_attention_heads
SCREAMING_SNAKE_CASE_ : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : str = hidden_act
SCREAMING_SNAKE_CASE_ : int = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Dict = type_vocab_size
SCREAMING_SNAKE_CASE_ : List[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Tuple = initializer_range
SCREAMING_SNAKE_CASE_ : List[str] = coordinate_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = shape_size
SCREAMING_SNAKE_CASE_ : List[str] = num_labels
SCREAMING_SNAKE_CASE_ : Optional[int] = num_choices
SCREAMING_SNAKE_CASE_ : Union[str, Any] = scope
SCREAMING_SNAKE_CASE_ : Dict = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = text_seq_length
SCREAMING_SNAKE_CASE_ : Tuple = (image_size // patch_size) ** 2 + 1
SCREAMING_SNAKE_CASE_ : Optional[int] = self.text_seq_length + self.image_seq_length
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
SCREAMING_SNAKE_CASE_ : Dict = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
SCREAMING_SNAKE_CASE_ : Optional[int] = bbox[i, j, 3]
SCREAMING_SNAKE_CASE_ : str = bbox[i, j, 1]
SCREAMING_SNAKE_CASE_ : Dict = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
SCREAMING_SNAKE_CASE_ : List[Any] = bbox[i, j, 2]
SCREAMING_SNAKE_CASE_ : Dict = bbox[i, j, 0]
SCREAMING_SNAKE_CASE_ : Tuple = tmp_coordinate
SCREAMING_SNAKE_CASE_ : Dict = tf.constant(lowercase__ )
SCREAMING_SNAKE_CASE_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ : Dict = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Any = random_attention_mask([self.batch_size, self.text_seq_length] )
SCREAMING_SNAKE_CASE_ : List[str] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : Tuple = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ : str = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = TFLayoutLMvaModel(config=lowercase__ )
# text + image
SCREAMING_SNAKE_CASE_ : int = model(lowercase__ , pixel_values=lowercase__ , training=lowercase__ )
SCREAMING_SNAKE_CASE_ : str = model(
lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , training=lowercase__ , )
SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ , training=lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
SCREAMING_SNAKE_CASE_ : Tuple = model(lowercase__ , training=lowercase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
SCREAMING_SNAKE_CASE_ : int = model({"pixel_values": pixel_values} , training=lowercase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.num_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TFLayoutLMvaForSequenceClassification(config=lowercase__ )
SCREAMING_SNAKE_CASE_ : List[str] = model(
lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ , training=lowercase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.num_labels
SCREAMING_SNAKE_CASE_ : Any = TFLayoutLMvaForTokenClassification(config=lowercase__ )
SCREAMING_SNAKE_CASE_ : int = model(
lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ , training=lowercase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = 2
SCREAMING_SNAKE_CASE_ : List[Any] = TFLayoutLMvaForQuestionAnswering(config=lowercase__ )
SCREAMING_SNAKE_CASE_ : int = model(
lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , start_positions=lowercase__ , end_positions=lowercase__ , training=lowercase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE_), (SCREAMING_SNAKE_CASE_), (SCREAMING_SNAKE_CASE_), (SCREAMING_SNAKE_CASE_), (SCREAMING_SNAKE_CASE_), (SCREAMING_SNAKE_CASE_), (SCREAMING_SNAKE_CASE_), (SCREAMING_SNAKE_CASE_)) : Any = config_and_inputs
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase,_UpperCAmelCase,unittest.TestCase ):
_A = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
_A = (
{"document-question-answering": TFLayoutLMvaForQuestionAnswering, "feature-extraction": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
_A = False
_A = False
_A = False
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
return True
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = copy.deepcopy(lowercase__ )
if model_class in get_values(lowercase__ ):
SCREAMING_SNAKE_CASE_ : str = {
k: tf.tile(tf.expand_dims(lowercase__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(lowercase__ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowercase__ ):
SCREAMING_SNAKE_CASE_ : Tuple = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowercase__ ):
SCREAMING_SNAKE_CASE_ : Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
SCREAMING_SNAKE_CASE_ : List[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowercase__ ):
SCREAMING_SNAKE_CASE_ : List[str] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowercase__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = TFLayoutLMvaModelTester(self )
SCREAMING_SNAKE_CASE_ : int = ConfigTester(self , config_class=lowercase__ , hidden_size=37 )
def __lowerCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : int = model_class(lowercase__ )
if getattr(lowercase__ , "hf_compute_loss" , lowercase__ ):
# The number of elements in the loss should be the same as the number of elements in the label
SCREAMING_SNAKE_CASE_ : Dict = self._prepare_for_class(inputs_dict.copy() , lowercase__ , return_labels=lowercase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=lowercase__ )[0]
]
SCREAMING_SNAKE_CASE_ : Any = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
SCREAMING_SNAKE_CASE_ : Dict = self._prepare_for_class(inputs_dict.copy() , lowercase__ , return_labels=lowercase__ )
SCREAMING_SNAKE_CASE_ : List[str] = prepared_for_class.pop("input_ids" )
SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase__ , **lowercase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
SCREAMING_SNAKE_CASE_ : List[Any] = self._prepare_for_class(inputs_dict.copy() , lowercase__ , return_labels=lowercase__ )
SCREAMING_SNAKE_CASE_ : int = prepared_for_class.pop("input_ids" )
if "labels" in prepared_for_class:
SCREAMING_SNAKE_CASE_ : str = prepared_for_class["labels"].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
SCREAMING_SNAKE_CASE_ : str = -100
SCREAMING_SNAKE_CASE_ : str = tf.convert_to_tensor(lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = model(lowercase__ , **lowercase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
SCREAMING_SNAKE_CASE_ : List[Any] = self._prepare_for_class(inputs_dict.copy() , lowercase__ , return_labels=lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = model(lowercase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
SCREAMING_SNAKE_CASE_ : Optional[Any] = self._prepare_for_class(inputs_dict.copy() , lowercase__ , return_labels=lowercase__ )
# Get keys that were added with the _prepare_for_class function
SCREAMING_SNAKE_CASE_ : int = prepared_for_class.keys() - inputs_dict.keys()
SCREAMING_SNAKE_CASE_ : Optional[int] = inspect.signature(model.call ).parameters
SCREAMING_SNAKE_CASE_ : Tuple = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
SCREAMING_SNAKE_CASE_ : List[Any] = {0: "input_ids"}
for label_key in label_keys:
SCREAMING_SNAKE_CASE_ : Optional[int] = signature_names.index(lowercase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = label_key
SCREAMING_SNAKE_CASE_ : List[str] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
SCREAMING_SNAKE_CASE_ : List[str] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
SCREAMING_SNAKE_CASE_ : List[str] = prepared_for_class[value]
SCREAMING_SNAKE_CASE_ : List[Any] = tuple(lowercase__ )
# Send to model
SCREAMING_SNAKE_CASE_ : int = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def __lowerCamelCase ( self ):
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
),
) : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
),
) : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE_ : List[str] = type
self.model_tester.create_and_check_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
),
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
),
) : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
),
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
@slow
def __lowerCamelCase ( self ):
"""simple docstring"""
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TFLayoutLMvaModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
def __lowerCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def __lowerCamelCase ( self ):
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=lowercase__ ) if is_vision_available() else None
@slow
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = TFLayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" )
SCREAMING_SNAKE_CASE_ : Any = self.default_image_processor
SCREAMING_SNAKE_CASE_ : Optional[int] = prepare_img()
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_processor(images=lowercase__ , return_tensors="tf" ).pixel_values
SCREAMING_SNAKE_CASE_ : Dict = tf.constant([[1, 2]] )
SCREAMING_SNAKE_CASE_ : Any = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
SCREAMING_SNAKE_CASE_ : List[Any] = model(input_ids=lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ , training=lowercase__ )
# verify the logits
SCREAMING_SNAKE_CASE_ : Tuple = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , lowercase__ )
SCREAMING_SNAKE_CASE_ : int = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase__ , atol=1e-4 ) )
| 68 | 0 |
'''simple docstring'''
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"kwargs, expected" , [
({"num_shards": 0, "max_num_jobs": 1}, []),
({"num_shards": 1_0, "max_num_jobs": 1}, [range(1_0 )]),
({"num_shards": 1_0, "max_num_jobs": 1_0}, [range(_snake_case , i + 1 ) for i in range(1_0 )]),
({"num_shards": 1, "max_num_jobs": 1_0}, [range(1 )]),
({"num_shards": 1_0, "max_num_jobs": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 1_0 )]),
({"num_shards": 3, "max_num_jobs": 1_0}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = _distribute_shards(**_snake_case )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, max_num_jobs, expected" , [
({"foo": 0}, 1_0, [{"foo": 0}]),
({"shards": [0, 1, 2, 3]}, 1, [{"shards": [0, 1, 2, 3]}]),
({"shards": [0, 1, 2, 3]}, 4, [{"shards": [0]}, {"shards": [1]}, {"shards": [2]}, {"shards": [3]}]),
({"shards": [0, 1]}, 4, [{"shards": [0]}, {"shards": [1]}]),
({"shards": [0, 1, 2, 3]}, 2, [{"shards": [0, 1]}, {"shards": [2, 3]}]),
] , )
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Tuple ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = _split_gen_kwargs(_snake_case , _snake_case )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, expected" , [
({"foo": 0}, 1),
({"shards": [0]}, 1),
({"shards": [0, 1, 2, 3]}, 4),
({"shards": [0, 1, 2, 3], "foo": 0}, 4),
({"shards": [0, 1, 2, 3], "other": (0, 1)}, 4),
({"shards": [0, 1, 2, 3], "shards2": [0, 1]}, RuntimeError),
] , )
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any ) -> Any:
"""simple docstring"""
if expected is RuntimeError:
with pytest.raises(_snake_case ):
_number_of_shards_in_gen_kwargs(_snake_case )
else:
SCREAMING_SNAKE_CASE_ : str = _number_of_shards_in_gen_kwargs(_snake_case )
assert out == expected
| 711 |
'''simple docstring'''
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = [1]
for i in range(2 , SCREAMING_SNAKE_CASE_ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
SCREAMING_SNAKE_CASE_ : Dict = list(range(SCREAMING_SNAKE_CASE_ ) )
# Find permutation
while factorials:
SCREAMING_SNAKE_CASE_ : Any = factorials.pop()
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Optional[int] = divmod(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 68 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
snake_case_ : str = logging.get_logger(__name__)
snake_case_ : str = {
'microsoft/layoutlmv3-base': 'https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
_A = "layoutlmv3"
def __init__( self , lowercase__=5_0265 , lowercase__=768 , lowercase__=12 , lowercase__=12 , lowercase__=3072 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=512 , lowercase__=2 , lowercase__=0.02 , lowercase__=1e-5 , lowercase__=1 , lowercase__=0 , lowercase__=2 , lowercase__=1024 , lowercase__=128 , lowercase__=128 , lowercase__=True , lowercase__=32 , lowercase__=128 , lowercase__=64 , lowercase__=256 , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=224 , lowercase__=3 , lowercase__=16 , lowercase__=None , **lowercase__ , ):
"""simple docstring"""
super().__init__(
vocab_size=UpperCamelCase__ , hidden_size=UpperCamelCase__ , num_hidden_layers=UpperCamelCase__ , num_attention_heads=UpperCamelCase__ , intermediate_size=UpperCamelCase__ , hidden_act=UpperCamelCase__ , hidden_dropout_prob=UpperCamelCase__ , attention_probs_dropout_prob=UpperCamelCase__ , max_position_embeddings=UpperCamelCase__ , type_vocab_size=UpperCamelCase__ , initializer_range=UpperCamelCase__ , layer_norm_eps=UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_ad_position_embeddings
SCREAMING_SNAKE_CASE_ : List[Any] = coordinate_size
SCREAMING_SNAKE_CASE_ : List[str] = shape_size
SCREAMING_SNAKE_CASE_ : Optional[int] = has_relative_attention_bias
SCREAMING_SNAKE_CASE_ : List[Any] = rel_pos_bins
SCREAMING_SNAKE_CASE_ : str = max_rel_pos
SCREAMING_SNAKE_CASE_ : Any = has_spatial_attention_bias
SCREAMING_SNAKE_CASE_ : Union[str, Any] = rel_ad_pos_bins
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_rel_ad_pos
SCREAMING_SNAKE_CASE_ : Union[str, Any] = text_embed
SCREAMING_SNAKE_CASE_ : List[str] = visual_embed
SCREAMING_SNAKE_CASE_ : Optional[Any] = input_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_channels
SCREAMING_SNAKE_CASE_ : List[Any] = patch_size
SCREAMING_SNAKE_CASE_ : List[Any] = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
_A = version.parse("1.12" )
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("attention_mask", {0: "batch", 1: "sequence"}),
("bbox", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
else:
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("bbox", {0: "batch", 1: "sequence"}),
("attention_mask", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels"}),
] )
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
return 1e-5
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
return 12
def __lowerCamelCase ( self , lowercase__ , lowercase__ = -1 , lowercase__ = -1 , lowercase__ = False , lowercase__ = None , lowercase__ = 3 , lowercase__ = 40 , lowercase__ = 40 , ):
"""simple docstring"""
setattr(processor.image_processor , "apply_ocr" , UpperCamelCase__ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Any = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : List[Any] = processor.tokenizer.num_special_tokens_to_add(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase__ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
SCREAMING_SNAKE_CASE_ : Any = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
SCREAMING_SNAKE_CASE_ : List[Any] = self._generate_dummy_images(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = dict(
processor(
UpperCamelCase__ , text=UpperCamelCase__ , boxes=UpperCamelCase__ , return_tensors=UpperCamelCase__ , ) )
return inputs
| 712 |
'''simple docstring'''
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--original_config_file',
type=str,
required=True,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--image_size',
default=5_1_2,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Dict:
"""simple docstring"""
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F"could not parse string as bool {string}" )
parser.add_argument(
'--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool
)
parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int)
snake_case_ = parser.parse_args()
snake_case_ = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 68 | 0 |
'''simple docstring'''
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int ) -> float:
"""simple docstring"""
if edge <= 0 or not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise ValueError("Length must be a positive." )
return 3 * ((2_5 + 1_0 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[Any] ) -> float:
"""simple docstring"""
if edge <= 0 or not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise ValueError("Length must be a positive." )
return ((1_5 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
_A = "camembert"
def __init__( self , lowercase__=3_0522 , lowercase__=768 , lowercase__=12 , lowercase__=12 , lowercase__=3072 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=512 , lowercase__=2 , lowercase__=0.02 , lowercase__=1e-12 , lowercase__=1 , lowercase__=0 , lowercase__=2 , lowercase__="absolute" , lowercase__=True , lowercase__=None , **lowercase__ , ):
"""simple docstring"""
super().__init__(pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__ )
SCREAMING_SNAKE_CASE_ : Tuple = vocab_size
SCREAMING_SNAKE_CASE_ : str = hidden_size
SCREAMING_SNAKE_CASE_ : str = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Dict = num_attention_heads
SCREAMING_SNAKE_CASE_ : Any = hidden_act
SCREAMING_SNAKE_CASE_ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : List[str] = type_vocab_size
SCREAMING_SNAKE_CASE_ : Tuple = initializer_range
SCREAMING_SNAKE_CASE_ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : List[Any] = position_embedding_type
SCREAMING_SNAKE_CASE_ : Any = use_cache
SCREAMING_SNAKE_CASE_ : Optional[int] = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE_ : Any = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 68 | 0 |
'''simple docstring'''
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int ) -> Union[str, Any]:
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(SCREAMING_SNAKE_CASE_ , int(b / 2 ) ) * actual_power(SCREAMING_SNAKE_CASE_ , int(b / 2 ) )
else:
return a * actual_power(SCREAMING_SNAKE_CASE_ , int(b / 2 ) ) * actual_power(SCREAMING_SNAKE_CASE_ , int(b / 2 ) )
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Dict:
"""simple docstring"""
if b < 0:
return 1 / actual_power(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return actual_power(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
print(power(-2, -3))
| 714 |
'''simple docstring'''
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : list[int] ) -> list[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = len(SCREAMING_SNAKE_CASE_ )
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE_ ):
if numbers[j] < numbers[i]:
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Tuple = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
snake_case_ = input('Enter numbers separated by a comma:\n').strip()
snake_case_ = [int(item) for item in user_input.split(',')]
print(exchange_sort(unsorted))
| 68 | 0 |
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self , lowercase__ , lowercase__=7 , lowercase__=3 , lowercase__=18 , lowercase__=30 , lowercase__=400 , lowercase__=True , lowercase__=None , lowercase__=True , lowercase__=[0.5, 0.5, 0.5] , lowercase__=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = size if size is not None else {"height": 18, "width": 18}
SCREAMING_SNAKE_CASE_ : int = parent
SCREAMING_SNAKE_CASE_ : Any = batch_size
SCREAMING_SNAKE_CASE_ : Any = num_channels
SCREAMING_SNAKE_CASE_ : str = image_size
SCREAMING_SNAKE_CASE_ : Any = min_resolution
SCREAMING_SNAKE_CASE_ : List[str] = max_resolution
SCREAMING_SNAKE_CASE_ : List[Any] = do_resize
SCREAMING_SNAKE_CASE_ : Optional[Any] = size
SCREAMING_SNAKE_CASE_ : List[Any] = do_normalize
SCREAMING_SNAKE_CASE_ : str = image_mean
SCREAMING_SNAKE_CASE_ : List[str] = image_std
def __lowerCamelCase ( self ):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( lowercase__,unittest.TestCase ):
_A = DPTImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = DPTImageProcessingTester(self )
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase , "image_mean" ) )
self.assertTrue(hasattr(__lowercase , "image_std" ) )
self.assertTrue(hasattr(__lowercase , "do_normalize" ) )
self.assertTrue(hasattr(__lowercase , "do_resize" ) )
self.assertTrue(hasattr(__lowercase , "size" ) )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
SCREAMING_SNAKE_CASE_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ : Tuple = image_processing(__lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ : str = image_processing(__lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ : int = image_processing(__lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
| 715 |
'''simple docstring'''
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
snake_case_ = logging.getLogger()
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Path , SCREAMING_SNAKE_CASE_ : list ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = "\n".join(SCREAMING_SNAKE_CASE_ )
Path(SCREAMING_SNAKE_CASE_ ).open("w" ).writelines(SCREAMING_SNAKE_CASE_ )
snake_case_ = 'patrickvonplaten/t5-tiny-random'
snake_case_ = 'sshleifer/bart-tiny-random'
snake_case_ = 'sshleifer/tiny-mbart'
snake_case_ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source"
SCREAMING_SNAKE_CASE_ : List[str] = input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
SCREAMING_SNAKE_CASE_ : Dict = [" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."]
_dump_articles(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE_ : Tuple = str(Path(self.get_auto_remove_tmp_dir() ) / "scores.json" )
SCREAMING_SNAKE_CASE_ : Tuple = "translation_en_to_de" if model == T5_TINY else "summarization"
SCREAMING_SNAKE_CASE_ : Dict = F"\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n ".split()
with patch.object(lowercase__ , "argv" , lowercase__ ):
run_generate()
assert Path(lowercase__ ).exists()
# os.remove(Path(output_file_name))
def __lowerCamelCase ( self ):
"""simple docstring"""
self.run_eval_tester(lowercase__ )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
self.run_eval_tester(lowercase__ )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source"
SCREAMING_SNAKE_CASE_ : Optional[Any] = input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
SCREAMING_SNAKE_CASE_ : List[Any] = {
"en": ["Machine learning is great, isn't it?", "I like to eat bananas", "Tomorrow is another great day!"],
"de": [
"Maschinelles Lernen ist großartig, oder?",
"Ich esse gerne Bananen",
"Morgen ist wieder ein toller Tag!",
],
}
SCREAMING_SNAKE_CASE_ : Dict = Path(self.get_auto_remove_tmp_dir() )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = str(tmp_dir / "scores.json" )
SCREAMING_SNAKE_CASE_ : List[Any] = str(tmp_dir / "val.target" )
_dump_articles(lowercase__ , text["en"] )
_dump_articles(lowercase__ , text["de"] )
SCREAMING_SNAKE_CASE_ : List[Any] = "translation_en_to_de" if model == T5_TINY else "summarization"
SCREAMING_SNAKE_CASE_ : List[str] = F"\n run_eval_search.py\n {model}\n {str(lowercase__ )}\n {str(lowercase__ )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n ".split()
testargs.extend(["--search", "num_beams=1:2 length_penalty=0.9:1.0"] )
with patch.object(lowercase__ , "argv" , lowercase__ ):
with CaptureStdout() as cs:
run_search()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [" num_beams | length_penalty", model, "Best score args"]
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["Info"]
if "translation" in task:
expected_strings.append("bleu" )
else:
expected_strings.extend(lowercase__ )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(lowercase__ ).exists()
os.remove(Path(lowercase__ ) )
| 68 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_A = 42
_A = 42
_A = None
class SCREAMING_SNAKE_CASE__ ( __snake_case,__snake_case ):
_A = 2
@register_to_config
def __init__( self , lowercase__ = 0.02 , lowercase__ = 100 , lowercase__ = 1.007 , lowercase__ = 80 , lowercase__ = 0.05 , lowercase__ = 50 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = sigma_max
# setable values
SCREAMING_SNAKE_CASE_ : int = None
SCREAMING_SNAKE_CASE_ : np.IntTensor = None
SCREAMING_SNAKE_CASE_ : torch.FloatTensor = None # sigma(t_i)
def __lowerCamelCase ( self , lowercase__ , lowercase__ = None ):
"""simple docstring"""
return sample
def __lowerCamelCase ( self , lowercase__ , lowercase__ = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = num_inference_steps
SCREAMING_SNAKE_CASE_ : List[str] = np.arange(0 , self.num_inference_steps )[::-1].copy()
SCREAMING_SNAKE_CASE_ : List[Any] = torch.from_numpy(A_ ).to(A_ )
SCREAMING_SNAKE_CASE_ : Dict = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
SCREAMING_SNAKE_CASE_ : int = torch.tensor(A_ , dtype=torch.floataa , device=A_ )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ = None ):
"""simple docstring"""
if self.config.s_min <= sigma <= self.config.s_max:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
SCREAMING_SNAKE_CASE_ : Dict = 0
# sample eps ~ N(0, S_noise^2 * I)
SCREAMING_SNAKE_CASE_ : Tuple = self.config.s_noise * randn_tensor(sample.shape , generator=A_ ).to(sample.device )
SCREAMING_SNAKE_CASE_ : List[str] = sigma + gamma * sigma
SCREAMING_SNAKE_CASE_ : List[Any] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = sample_hat + sigma_hat * model_output
SCREAMING_SNAKE_CASE_ : Dict = (sample_hat - pred_original_sample) / sigma_hat
SCREAMING_SNAKE_CASE_ : Optional[int] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=A_ , derivative=A_ , pred_original_sample=A_ )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = sample_prev + sigma_prev * model_output
SCREAMING_SNAKE_CASE_ : List[Any] = (sample_prev - pred_original_sample) / sigma_prev
SCREAMING_SNAKE_CASE_ : List[str] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=A_ , derivative=A_ , pred_original_sample=A_ )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
raise NotImplementedError()
| 716 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : NDArray[floataa] , SCREAMING_SNAKE_CASE_ : NDArray[floataa] , SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : int , ) -> list[float]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : str = coefficient_matrix.shape
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : List[Any] = constant_matrix.shape
if rowsa != colsa:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = F"Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"
raise ValueError(SCREAMING_SNAKE_CASE_ )
if colsa != 1:
SCREAMING_SNAKE_CASE_ : List[Any] = F"Constant matrix must be nx1 but received {rowsa}x{colsa}"
raise ValueError(SCREAMING_SNAKE_CASE_ )
if rowsa != rowsa:
SCREAMING_SNAKE_CASE_ : Any = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
F"received {rowsa}x{colsa} and {rowsa}x{colsa}"
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) != rowsa:
SCREAMING_SNAKE_CASE_ : int = (
"Number of initial values must be equal to number of rows in coefficient "
F"matrix but received {len(SCREAMING_SNAKE_CASE_ )} and {rowsa}"
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
SCREAMING_SNAKE_CASE_ : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Any = table.shape
strictly_diagonally_dominant(SCREAMING_SNAKE_CASE_ )
# Iterates the whole matrix for given number of times
for _ in range(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ : Tuple = []
for row in range(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ : Any = 0
for col in range(SCREAMING_SNAKE_CASE_ ):
if col == row:
SCREAMING_SNAKE_CASE_ : Any = table[row][col]
elif col == cols - 1:
SCREAMING_SNAKE_CASE_ : Dict = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
SCREAMING_SNAKE_CASE_ : Optional[Any] = (temp + val) / denom
new_val.append(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = new_val
return [float(SCREAMING_SNAKE_CASE_ ) for i in new_val]
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : NDArray[floataa] ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Dict = table.shape
SCREAMING_SNAKE_CASE_ : Tuple = True
for i in range(0 , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ : int = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 68 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
'''simple docstring'''
_A = """dandelin/vilt-b32-finetuned-vqa"""
_A = (
"""This is a tool that answers a question about an image. It takes an input named `image` which should be the """
"""image containing the information, as well as a `question` which should be the question in English. It """
"""returns a text that is the answer to the question."""
)
_A = """image_qa"""
_A = AutoProcessor
_A = AutoModelForVisualQuestionAnswering
_A = ["""image""", """text"""]
_A = ["""text"""]
def __init__( self , *lowercase__ , **lowercase__ ):
"""simple docstring"""
requires_backends(self , ["vision"] )
super().__init__(*lowercase__ , **lowercase__ )
def __lowerCamelCase ( self , lowercase__ , lowercase__ ):
"""simple docstring"""
return self.pre_processor(lowercase__ , lowercase__ , return_tensors="pt" )
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
with torch.no_grad():
return self.model(**lowercase__ ).logits
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 717 |
'''simple docstring'''
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> int:
"""simple docstring"""
return 1 if input_a == input_a else 0
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 68 | 0 |
'''simple docstring'''
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
snake_case_ = logging.get_logger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ :
_A = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys() )} )
_A = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
_A = field(
default=128,metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
},)
_A = field(
default=UpperCAmelCase__,metadata={"help": "Overwrite the cached training and evaluation sets"} )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.task_name.lower()
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
_A = "train"
_A = "dev"
_A = "test"
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
_A = 42
_A = 42
_A = 42
def __init__( self , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = Split.train , lowercase__ = None , ):
"""simple docstring"""
warnings.warn(
"This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py" , lowerCamelCase__ , )
SCREAMING_SNAKE_CASE_ : Optional[int] = args
SCREAMING_SNAKE_CASE_ : Any = glue_processors[args.task_name]()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = glue_output_modes[args.task_name]
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
try:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
# Load data features from cache or dataset file
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}" , )
SCREAMING_SNAKE_CASE_ : List[Any] = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = label_list[2], label_list[1]
SCREAMING_SNAKE_CASE_ : Optional[int] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE_ : Dict = cached_features_file + ".lock"
with FileLock(lowerCamelCase__ ):
if os.path.exists(lowerCamelCase__ ) and not args.overwrite_cache:
SCREAMING_SNAKE_CASE_ : Tuple = time.time()
SCREAMING_SNAKE_CASE_ : Dict = torch.load(lowerCamelCase__ )
logger.info(
F"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start )
else:
logger.info(F"Creating features from dataset file at {args.data_dir}" )
if mode == Split.dev:
SCREAMING_SNAKE_CASE_ : List[Any] = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
SCREAMING_SNAKE_CASE_ : Optional[int] = self.processor.get_test_examples(args.data_dir )
else:
SCREAMING_SNAKE_CASE_ : Tuple = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
SCREAMING_SNAKE_CASE_ : str = examples[:limit_length]
SCREAMING_SNAKE_CASE_ : Optional[int] = glue_convert_examples_to_features(
lowerCamelCase__ , lowerCamelCase__ , max_length=args.max_seq_length , label_list=lowerCamelCase__ , output_mode=self.output_mode , )
SCREAMING_SNAKE_CASE_ : Any = time.time()
torch.save(self.features , lowerCamelCase__ )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , lowercase__ ):
"""simple docstring"""
return self.features[i]
def __lowerCamelCase ( self ):
"""simple docstring"""
return self.label_list
| 718 |
'''simple docstring'''
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , ) -> float:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
SCREAMING_SNAKE_CASE_ : int = 1 - (matter_density + radiation_density + dark_energy)
SCREAMING_SNAKE_CASE_ : List[Any] = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
SCREAMING_SNAKE_CASE_ : Dict = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
snake_case_ = 0.3
print(
hubble_parameter(
hubble_constant=6_8.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 68 | 0 |
'''simple docstring'''
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
snake_case_ : Union[str, Any] = pytest.mark.integration
snake_case_ : List[Any] = {'comet'}
snake_case_ : int = importlib.util.find_spec('fairseq') is not None
snake_case_ : Any = {'code_eval'}
snake_case_ : Any = os.name == 'nt'
snake_case_ : int = {'bertscore', 'frugalscore', 'perplexity'}
snake_case_ : Dict = importlib.util.find_spec('transformers') is not None
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
@wraps(lowercase__ )
def wrapper(self : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("\"test requires Fairseq\"" )
else:
test_case(self , lowercase__ )
return wrapper
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int ) -> List[Any]:
"""simple docstring"""
@wraps(lowercase__ )
def wrapper(self : Dict , SCREAMING_SNAKE_CASE_ : Any ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("\"test requires transformers\"" )
else:
test_case(self , lowercase__ )
return wrapper
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
@wraps(lowercase__ )
def wrapper(self : List[Any] , SCREAMING_SNAKE_CASE_ : str ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("\"test not supported on Windows\"" )
else:
test_case(self , lowercase__ )
return wrapper
def __lowerCamelCase ( ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("./metrics/*/" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
__a,__a,__a )
@local
class SCREAMING_SNAKE_CASE__ ( parameterized.TestCase ):
_A = {}
_A = None
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:load_metric is deprecated:FutureWarning" )
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = "[...]"
SCREAMING_SNAKE_CASE_ : Any = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , lowerCAmelCase_ ) ).module_path )
SCREAMING_SNAKE_CASE_ : str = datasets.load.import_main_class(metric_module.__name__ , dataset=lowerCAmelCase_ )
# check parameters
SCREAMING_SNAKE_CASE_ : Optional[int] = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(lowerCAmelCase_ , metric_module.__name__ ):
with self.use_local_metrics():
try:
SCREAMING_SNAKE_CASE_ : List[Any] = doctest.testmod(lowerCAmelCase_ , verbose=lowerCAmelCase_ , raise_on_error=lowerCAmelCase_ )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = "[...]"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , lowerCAmelCase_ ) ).module_path )
# run doctest
with self.use_local_metrics():
SCREAMING_SNAKE_CASE_ : Any = doctest.testmod(lowerCAmelCase_ , verbose=lowerCAmelCase_ , raise_on_error=lowerCAmelCase_ )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def __lowerCamelCase ( self , lowercase__ , lowercase__ ):
"""simple docstring"""
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](lowerCAmelCase_ ):
yield
else:
yield
@contextmanager
def __lowerCamelCase ( self ):
"""simple docstring"""
def load_local_metric(lowercase__ , *lowercase__ , **lowercase__ ):
return load_metric(os.path.join("metrics" , lowerCAmelCase_ ) , *lowerCAmelCase_ , **lowerCAmelCase_ )
with patch("datasets.load_metric" ) as mock_load_metric:
SCREAMING_SNAKE_CASE_ : int = load_local_metric
yield
@classmethod
def __lowerCamelCase ( cls , lowercase__ ):
"""simple docstring"""
def wrapper(lowercase__ ):
SCREAMING_SNAKE_CASE_ : Dict = contextmanager(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("bleurt" )
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[Any] ) -> Tuple:
"""simple docstring"""
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("sv" , "" , "" ) # handle pytest cli flags
class SCREAMING_SNAKE_CASE__ ( __a ):
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
assert len(input_dict["input_ids"] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("bleurt.score._create_predictor" ) as mock_create_predictor:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("bertscore" )
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Dict:
"""simple docstring"""
import torch
def bert_cos_score_idf(SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : int ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(lowercase__ ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("bert_score.scorer.get_model" ), patch(
"bert_score.scorer.bert_cos_score_idf" ) as mock_bert_cos_score_idf:
SCREAMING_SNAKE_CASE_ : Tuple = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("comet" )
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Dict ) -> Dict:
"""simple docstring"""
def load_from_checkpoint(SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
class SCREAMING_SNAKE_CASE__ :
def __lowerCamelCase ( self , lowercase__ , *lowercase__ , **lowercase__ ):
"""simple docstring"""
assert len(lowerCAmelCase_ ) == 2
SCREAMING_SNAKE_CASE_ : Optional[Any] = [0.19, 0.92]
return scores, sum(lowerCAmelCase_ ) / len(lowerCAmelCase_ )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("comet.download_model" ) as mock_download_model:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
with patch("comet.load_from_checkpoint" ) as mock_load_from_checkpoint:
SCREAMING_SNAKE_CASE_ : int = load_from_checkpoint
yield
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = load_metric(os.path.join("metrics" , "seqeval" ) )
SCREAMING_SNAKE_CASE_ : str = "ERROR"
SCREAMING_SNAKE_CASE_ : List[Any] = F"Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"
with pytest.raises(lowercase__ , match=re.escape(lowercase__ ) ):
metric.compute(predictions=[] , references=[] , scheme=lowercase__ )
| 719 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = [[1, 2, 4], [1, 2, 3, 4]]
SCREAMING_SNAKE_CASE_ : Any = DisjunctiveConstraint(lowercase__ )
self.assertTrue(isinstance(dc.token_ids , lowercase__ ) )
with self.assertRaises(lowercase__ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(lowercase__ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(lowercase__ ):
DisjunctiveConstraint(lowercase__ ) # fails here
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = [[1, 2, 3], [1, 2, 4]]
SCREAMING_SNAKE_CASE_ : Optional[Any] = DisjunctiveConstraint(lowercase__ )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Dict = dc.update(1 )
SCREAMING_SNAKE_CASE_ : Optional[int] = stepped is True and completed is False and reset is False
self.assertTrue(lowercase__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Tuple = dc.update(2 )
SCREAMING_SNAKE_CASE_ : Tuple = stepped is True and completed is False and reset is False
self.assertTrue(lowercase__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Any = dc.update(3 )
SCREAMING_SNAKE_CASE_ : Tuple = stepped is True and completed is True and reset is False
self.assertTrue(lowercase__ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
SCREAMING_SNAKE_CASE_ : Dict = DisjunctiveConstraint(lowercase__ )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Dict = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : List[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Dict = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : List[Any] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Tuple = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : str = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : str = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 68 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case_ = {
'configuration_luke': ['LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LukeConfig'],
'tokenization_luke': ['LukeTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'LUKE_PRETRAINED_MODEL_ARCHIVE_LIST',
'LukeForEntityClassification',
'LukeForEntityPairClassification',
'LukeForEntitySpanClassification',
'LukeForMultipleChoice',
'LukeForQuestionAnswering',
'LukeForSequenceClassification',
'LukeForTokenClassification',
'LukeForMaskedLM',
'LukeModel',
'LukePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 720 |
'''simple docstring'''
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase,_UpperCAmelCase,unittest.TestCase ):
_A = VQModel
_A = "sample"
@property
def __lowerCamelCase ( self , lowercase__=(32, 32) ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = 4
SCREAMING_SNAKE_CASE_ : str = 3
SCREAMING_SNAKE_CASE_ : str = floats_tensor((batch_size, num_channels) + sizes ).to(lowercase__ )
return {"sample": image}
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
return (3, 32, 32)
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
return (3, 32, 32)
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 3,
}
SCREAMING_SNAKE_CASE_ : int = self.dummy_input
return init_dict, inputs_dict
def __lowerCamelCase ( self ):
"""simple docstring"""
pass
def __lowerCamelCase ( self ):
"""simple docstring"""
pass
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : int = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = VQModel.from_pretrained("fusing/vqgan-dummy" )
model.to(lowercase__ ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
SCREAMING_SNAKE_CASE_ : str = image.to(lowercase__ )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : int = model(lowercase__ ).sample
SCREAMING_SNAKE_CASE_ : Any = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] )
# fmt: on
self.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-3 ) )
| 68 | 0 |
'''simple docstring'''
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> List[str]:
"""simple docstring"""
return 1 if input_a == input_a else 0
def __lowerCamelCase ( ) -> Dict:
"""simple docstring"""
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 721 |
'''simple docstring'''
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = logging.get_logger()
# the current default level is logging.WARNING
SCREAMING_SNAKE_CASE_ : Optional[int] = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = logging.get_verbosity()
SCREAMING_SNAKE_CASE_ : List[Any] = logging.get_logger("transformers.models.bart.tokenization_bart" )
SCREAMING_SNAKE_CASE_ : List[Any] = "Testing 1, 2, 3"
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(lowercase__ ) as cl:
logger.warning(lowercase__ )
self.assertEqual(cl.out , msg + "\n" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(lowercase__ ) as cl:
logger.warning(lowercase__ )
self.assertEqual(cl.out , "" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(lowercase__ ) as cl:
logger.warning(lowercase__ )
self.assertEqual(cl.out , msg + "\n" )
# restore to the original level
logging.set_verbosity(lowercase__ )
@mockenv(TRANSFORMERS_VERBOSITY="error" )
def __lowerCamelCase ( self ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
SCREAMING_SNAKE_CASE_ : Tuple = logging.get_logger("transformers.models.bart.tokenization_bart" )
SCREAMING_SNAKE_CASE_ : int = os.getenv("TRANSFORMERS_VERBOSITY" , lowercase__ )
SCREAMING_SNAKE_CASE_ : Dict = logging.log_levels[env_level_str]
SCREAMING_SNAKE_CASE_ : str = logging.get_verbosity()
self.assertEqual(
lowercase__ , lowercase__ , F"TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}" , )
# restore to the original level
SCREAMING_SNAKE_CASE_ : Optional[int] = ""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="super-error" )
def __lowerCamelCase ( self ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
SCREAMING_SNAKE_CASE_ : List[Any] = logging.logging.getLogger()
with CaptureLogger(lowercase__ ) as cl:
# this action activates the env var
logging.get_logger("transformers.models.bart.tokenization_bart" )
self.assertIn("Unknown option TRANSFORMERS_VERBOSITY=super-error" , cl.out )
# no need to restore as nothing was changed
def __lowerCamelCase ( self ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
SCREAMING_SNAKE_CASE_ : str = logging.get_logger("transformers.models.bart.tokenization_bart" )
SCREAMING_SNAKE_CASE_ : List[Any] = "Testing 1, 2, 3"
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="1" ):
# nothing should be logged as env var disables this method
with CaptureLogger(lowercase__ ) as cl:
logger.warning_advice(lowercase__ )
self.assertEqual(cl.out , "" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(lowercase__ ) as cl:
logger.warning_advice(lowercase__ )
self.assertEqual(cl.out , msg + "\n" )
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 68 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'google/mobilenet_v2_1.4_224': 'https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json',
'google/mobilenet_v2_1.0_224': 'https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json',
'google/mobilenet_v2_0.75_160': 'https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json',
'google/mobilenet_v2_0.35_96': 'https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class SCREAMING_SNAKE_CASE__ ( __a ):
_A = "mobilenet_v2"
def __init__( self , lowercase__=3 , lowercase__=224 , lowercase__=1.0 , lowercase__=8 , lowercase__=8 , lowercase__=6 , lowercase__=32 , lowercase__=True , lowercase__=True , lowercase__="relu6" , lowercase__=True , lowercase__=0.8 , lowercase__=0.02 , lowercase__=0.001 , lowercase__=255 , **lowercase__ , ):
"""simple docstring"""
super().__init__(**A__ )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
SCREAMING_SNAKE_CASE_ : List[str] = num_channels
SCREAMING_SNAKE_CASE_ : Any = image_size
SCREAMING_SNAKE_CASE_ : Any = depth_multiplier
SCREAMING_SNAKE_CASE_ : Optional[Any] = depth_divisible_by
SCREAMING_SNAKE_CASE_ : str = min_depth
SCREAMING_SNAKE_CASE_ : List[Any] = expand_ratio
SCREAMING_SNAKE_CASE_ : str = output_stride
SCREAMING_SNAKE_CASE_ : Optional[Any] = first_layer_is_expansion
SCREAMING_SNAKE_CASE_ : Optional[int] = finegrained_output
SCREAMING_SNAKE_CASE_ : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Optional[int] = tf_padding
SCREAMING_SNAKE_CASE_ : Dict = classifier_dropout_prob
SCREAMING_SNAKE_CASE_ : List[str] = initializer_range
SCREAMING_SNAKE_CASE_ : str = layer_norm_eps
SCREAMING_SNAKE_CASE_ : str = semantic_loss_ignore_index
class SCREAMING_SNAKE_CASE__ ( __a ):
_A = version.parse("1.11" )
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
return 1e-4
| 700 |
'''simple docstring'''
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.nn.Linear(2 , 4 )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.optim.AdamW(model.parameters() , lr=1.0 )
SCREAMING_SNAKE_CASE_ : Any = torch.optim.lr_scheduler.OneCycleLR(SCREAMING_SNAKE_CASE_ , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
SCREAMING_SNAKE_CASE_ : Dict = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
SCREAMING_SNAKE_CASE_ : Tuple = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[str] ) -> Tuple:
"""simple docstring"""
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Any ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
@require_cuda
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(lowercase__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = Accelerator(cpu=lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Accelerator()
SCREAMING_SNAKE_CASE_ : Any = GradientState()
assert state.num_steps == 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 4
assert state.num_steps == 4
assert state.sync_gradients is True
SCREAMING_SNAKE_CASE_ : Optional[int] = False
assert state.sync_gradients is False
GradientState._reset_state()
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = Accelerator()
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Optional[int] = create_components()
(
(
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
),
) : Optional[Any] = accelerator.prepare(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = Accelerator()
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : str = create_components()
accelerator.prepare(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def __lowerCamelCase ( self ):
"""simple docstring"""
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*lowercase__ , **lowercase__ ):
pass
with patch("torch.cuda.set_device" , lowercase__ ), patch_environment(ACCELERATE_TORCH_DEVICE="cuda:64" ):
SCREAMING_SNAKE_CASE_ : List[str] = Accelerator()
self.assertEqual(str(accelerator.state.device ) , "cuda:64" )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = Accelerator()
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Dict = create_components()
accelerator.prepare(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = get_signature(lowercase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(lowercase__ )
# make sure random weights don't match
load_random_weights(lowercase__ )
self.assertTrue(abs(model_signature - get_signature(lowercase__ ) ) > 1e-3 )
# make sure loaded weights match
accelerator.load_state(lowercase__ )
self.assertTrue(abs(model_signature - get_signature(lowercase__ ) ) < 1e-3 )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = Accelerator()
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Optional[int] = create_components()
accelerator.prepare(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_signature(lowercase__ )
# saving hook
def save_config(lowercase__ , lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = {"class_name": models[0].__class__.__name__}
with open(os.path.join(lowercase__ , "data.json" ) , "w" ) as f:
json.dump(lowercase__ , lowercase__ )
# loading hook
def load_config(lowercase__ , lowercase__ ):
with open(os.path.join(lowercase__ , "data.json" ) , "r" ) as f:
SCREAMING_SNAKE_CASE_ : Any = json.load(lowercase__ )
SCREAMING_SNAKE_CASE_ : List[str] = config["class_name"]
SCREAMING_SNAKE_CASE_ : Dict = accelerator.register_save_state_pre_hook(lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = accelerator.register_load_state_pre_hook(lowercase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(lowercase__ )
# make sure random weights don't match with hooks
load_random_weights(lowercase__ )
self.assertTrue(abs(model_signature - get_signature(lowercase__ ) ) > 1e-3 )
# random class name to verify correct one is loaded
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "random"
# make sure loaded weights match with hooks
accelerator.load_state(lowercase__ )
self.assertTrue(abs(model_signature - get_signature(lowercase__ ) ) < 1e-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(lowercase__ )
# make sure random weights don't match with hooks removed
load_random_weights(lowercase__ )
self.assertTrue(abs(model_signature - get_signature(lowercase__ ) ) > 1e-3 )
# random class name to verify correct one is loaded
SCREAMING_SNAKE_CASE_ : Tuple = "random"
# make sure loaded weights match with hooks removed
accelerator.load_state(lowercase__ )
self.assertTrue(abs(model_signature - get_signature(lowercase__ ) ) < 1e-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = Accelerator()
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Tuple = create_components()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
# This should work
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Optional[int] = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
self.assertTrue(dummy_obj is None )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = Accelerator()
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Dict = create_components()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [1, 2, 3]
# This should work
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : int = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
self.assertEqual(
getattr(lowercase__ , "_is_accelerate_prepared" , lowercase__ ) , lowercase__ , "Dummy object should have `_is_accelerate_prepared` set to `True`" , )
self.assertEqual(
getattr(lowercase__ , "_is_accelerate_prepared" , lowercase__ ) , lowercase__ , "Model is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowercase__ , "_is_accelerate_prepared" , lowercase__ ) , lowercase__ , "Optimizer is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowercase__ , "_is_accelerate_prepared" , lowercase__ ) , lowercase__ , "Scheduler is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowercase__ , "_is_accelerate_prepared" , lowercase__ ) , lowercase__ , "Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowercase__ , "_is_accelerate_prepared" , lowercase__ ) , lowercase__ , "Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
@slow
@require_bnb
def __lowerCamelCase ( self ):
"""simple docstring"""
from transformers import AutoModelForCausalLM
SCREAMING_SNAKE_CASE_ : Optional[int] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=lowercase__ , device_map={"": 0} , )
SCREAMING_SNAKE_CASE_ : Optional[int] = Accelerator()
# This should work
SCREAMING_SNAKE_CASE_ : List[Any] = accelerator.prepare(lowercase__ )
@slow
@require_bnb
def __lowerCamelCase ( self ):
"""simple docstring"""
from transformers import AutoModelForCausalLM
SCREAMING_SNAKE_CASE_ : Optional[Any] = Accelerator()
with init_empty_weights():
SCREAMING_SNAKE_CASE_ : Tuple = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
SCREAMING_SNAKE_CASE_ : Optional[Any] = infer_auto_device_map(lowercase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = "cpu"
SCREAMING_SNAKE_CASE_ : Tuple = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , device_map=lowercase__ , load_in_abit=lowercase__ , llm_inta_enable_fpaa_cpu_offload=lowercase__ )
# This should not work and get value error
with self.assertRaises(lowercase__ ):
SCREAMING_SNAKE_CASE_ : str = accelerator.prepare(lowercase__ )
@slow
@require_bnb
@require_multi_gpu
def __lowerCamelCase ( self ):
"""simple docstring"""
from transformers import AutoModelForCausalLM
SCREAMING_SNAKE_CASE_ : str = {"distributed_type": DistributedType.MULTI_GPU}
with init_empty_weights():
SCREAMING_SNAKE_CASE_ : Optional[int] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
SCREAMING_SNAKE_CASE_ : str = infer_auto_device_map(lowercase__ )
SCREAMING_SNAKE_CASE_ : Dict = 1
SCREAMING_SNAKE_CASE_ : str = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=lowercase__ , device_map=lowercase__ , )
SCREAMING_SNAKE_CASE_ : Any = Accelerator()
# This should not work and get value error
with self.assertRaises(lowercase__ ):
SCREAMING_SNAKE_CASE_ : Tuple = accelerator.prepare(lowercase__ )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def __lowerCamelCase ( self ):
"""simple docstring"""
from transformers import AutoModelForCausalLM
with init_empty_weights():
SCREAMING_SNAKE_CASE_ : Optional[int] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = infer_auto_device_map(lowercase__ )
SCREAMING_SNAKE_CASE_ : List[str] = 1
SCREAMING_SNAKE_CASE_ : str = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=lowercase__ , device_map=lowercase__ , )
SCREAMING_SNAKE_CASE_ : Any = Accelerator()
# This should work
SCREAMING_SNAKE_CASE_ : Optional[int] = accelerator.prepare(lowercase__ )
@require_cuda
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = torch.nn.Linear(10 , 10 )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.optim.SGD(model.parameters() , lr=0.01 )
SCREAMING_SNAKE_CASE_ : Tuple = Accelerator(cpu=lowercase__ )
SCREAMING_SNAKE_CASE_ : Dict = accelerator.prepare(lowercase__ )
| 68 | 0 |
'''simple docstring'''
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
snake_case_ = {
'facebook/maskformer-swin-base-ade': (
'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
snake_case_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
_A = 'maskformer'
_A = {'hidden_size': 'mask_feature_size'}
_A = ['resnet', 'swin']
_A = ['detr']
def __init__( self , lowercase__ = 256 , lowercase__ = 256 , lowercase__ = 0.1 , lowercase__ = False , lowercase__ = None , lowercase__ = None , lowercase__ = 0.02 , lowercase__ = 1.0 , lowercase__ = 1.0 , lowercase__ = 1.0 , lowercase__ = 20.0 , lowercase__ = None , **lowercase__ , ):
"""simple docstring"""
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
SCREAMING_SNAKE_CASE_ : Union[str, Any] = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = backbone_config.pop("model_type" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE_ : Optional[Any] = config_class.from_dict(lowercase__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. "
F"Supported model types: {','.join(self.backbones_supported )}" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
SCREAMING_SNAKE_CASE_ : Optional[int] = DetrConfig()
else:
# verify that the decoder is supported
SCREAMING_SNAKE_CASE_ : Dict = (
decoder_config.pop("model_type" ) if isinstance(lowercase__ , lowercase__ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F"Transformer Decoder {decoder_type} not supported, please use one of"
F" {','.join(self.decoders_supported )}" )
if isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE_ : List[str] = CONFIG_MAPPING[decoder_type]
SCREAMING_SNAKE_CASE_ : int = config_class.from_dict(lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = backbone_config
SCREAMING_SNAKE_CASE_ : int = decoder_config
# main feature dimension for the model
SCREAMING_SNAKE_CASE_ : Tuple = fpn_feature_size
SCREAMING_SNAKE_CASE_ : Dict = mask_feature_size
# initializer
SCREAMING_SNAKE_CASE_ : Union[str, Any] = init_std
SCREAMING_SNAKE_CASE_ : List[Any] = init_xavier_std
# Hungarian matcher && loss
SCREAMING_SNAKE_CASE_ : int = cross_entropy_weight
SCREAMING_SNAKE_CASE_ : List[str] = dice_weight
SCREAMING_SNAKE_CASE_ : List[Any] = mask_weight
SCREAMING_SNAKE_CASE_ : Dict = use_auxiliary_loss
SCREAMING_SNAKE_CASE_ : Dict = no_object_weight
SCREAMING_SNAKE_CASE_ : List[Any] = output_auxiliary_logits
SCREAMING_SNAKE_CASE_ : List[Any] = self.decoder_config.encoder_attention_heads
SCREAMING_SNAKE_CASE_ : Tuple = self.decoder_config.num_hidden_layers
super().__init__(**lowercase__ )
@classmethod
def __lowerCamelCase ( cls , lowercase__ , lowercase__ , **lowercase__ ):
"""simple docstring"""
return cls(
backbone_config=lowercase__ , decoder_config=lowercase__ , **lowercase__ , )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE_ : List[str] = self.decoder_config.to_dict()
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.__class__.model_type
return output
| 701 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'facebook/xmod-base': 'https://huggingface.co/facebook/xmod-base/resolve/main/config.json',
'facebook/xmod-large-prenorm': 'https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json',
'facebook/xmod-base-13-125k': 'https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json',
'facebook/xmod-base-30-125k': 'https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json',
'facebook/xmod-base-30-195k': 'https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json',
'facebook/xmod-base-60-125k': 'https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json',
'facebook/xmod-base-60-265k': 'https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json',
'facebook/xmod-base-75-125k': 'https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json',
'facebook/xmod-base-75-269k': 'https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
_A = "xmod"
def __init__( self , lowercase__=3_0522 , lowercase__=768 , lowercase__=12 , lowercase__=12 , lowercase__=3072 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=512 , lowercase__=2 , lowercase__=0.02 , lowercase__=1e-12 , lowercase__=1 , lowercase__=0 , lowercase__=2 , lowercase__="absolute" , lowercase__=True , lowercase__=None , lowercase__=False , lowercase__=2 , lowercase__=False , lowercase__=True , lowercase__=True , lowercase__=("en_XX",) , lowercase__=None , **lowercase__ , ):
"""simple docstring"""
super().__init__(pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__ )
SCREAMING_SNAKE_CASE_ : Tuple = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE_ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE_ : List[str] = hidden_act
SCREAMING_SNAKE_CASE_ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE_ : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Dict = type_vocab_size
SCREAMING_SNAKE_CASE_ : str = initializer_range
SCREAMING_SNAKE_CASE_ : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Tuple = position_embedding_type
SCREAMING_SNAKE_CASE_ : str = use_cache
SCREAMING_SNAKE_CASE_ : Optional[int] = classifier_dropout
SCREAMING_SNAKE_CASE_ : int = pre_norm
SCREAMING_SNAKE_CASE_ : Optional[int] = adapter_reduction_factor
SCREAMING_SNAKE_CASE_ : List[str] = adapter_layer_norm
SCREAMING_SNAKE_CASE_ : List[str] = adapter_reuse_layer_norm
SCREAMING_SNAKE_CASE_ : int = ln_before_adapter
SCREAMING_SNAKE_CASE_ : List[Any] = list(lowercase__ )
SCREAMING_SNAKE_CASE_ : Any = default_language
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ : Tuple = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 68 | 0 |
'''simple docstring'''
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 702 |
'''simple docstring'''
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 68 | 0 |
'''simple docstring'''
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class SCREAMING_SNAKE_CASE__ ( yaml.SafeLoader ):
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = [self.constructed_objects[key_node] for key_node, _ in node.value]
SCREAMING_SNAKE_CASE_ : str = [tuple(lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else key for key in keys]
SCREAMING_SNAKE_CASE_ : str = Counter(lowercase__ )
SCREAMING_SNAKE_CASE_ : List[str] = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F"Got duplicate yaml keys: {duplicate_keys}" )
def __lowerCamelCase ( self , lowercase__ , lowercase__=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = super().construct_mapping(lowercase__ , deep=lowercase__ )
self._check_no_duplicates_on_constructed_node(lowercase__ )
return mapping
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Any ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
SCREAMING_SNAKE_CASE_ : int = full_content[1:].index("---" ) + 1
SCREAMING_SNAKE_CASE_ : Optional[Any] = '''\n'''.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(lowerCAmelCase_ )
class SCREAMING_SNAKE_CASE__ ( a__ ):
# class attributes
_A = {"""train_eval_index"""} # train-eval-index in the YAML metadata
@classmethod
def __lowerCamelCase ( cls , lowercase__ ):
"""simple docstring"""
with open(lowercase__ , encoding="utf-8" ) as readme_file:
SCREAMING_SNAKE_CASE_ : Tuple = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(lowercase__ )
else:
return cls()
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
if path.exists():
with open(lowercase__ , encoding="utf-8" ) as readme_file:
SCREAMING_SNAKE_CASE_ : Any = readme_file.read()
else:
SCREAMING_SNAKE_CASE_ : int = None
SCREAMING_SNAKE_CASE_ : Optional[int] = self._to_readme(lowercase__ )
with open(lowercase__ , "w" , encoding="utf-8" ) as readme_file:
readme_file.write(lowercase__ )
def __lowerCamelCase ( self , lowercase__ = None ):
"""simple docstring"""
if readme_content is not None:
SCREAMING_SNAKE_CASE_ : Any = _split_yaml_from_readme(lowercase__ )
SCREAMING_SNAKE_CASE_ : Tuple = '''---\n''' + self.to_yaml_string() + '''---\n''' + content
else:
SCREAMING_SNAKE_CASE_ : int = '''---\n''' + self.to_yaml_string() + '''---\n'''
return full_content
@classmethod
def __lowerCamelCase ( cls , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = yaml.load(lowercase__ , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
(key.replace("-" , "_" ) if key.replace("-" , "_" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
return yaml.safe_dump(
{
(key.replace("_" , "-" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=lowercase__ , allow_unicode=lowercase__ , encoding="utf-8" , ).decode("utf-8" )
snake_case_ = {
'image-classification': [],
'translation': [],
'image-segmentation': [],
'fill-mask': [],
'automatic-speech-recognition': [],
'token-classification': [],
'sentence-similarity': [],
'audio-classification': [],
'question-answering': [],
'summarization': [],
'zero-shot-classification': [],
'table-to-text': [],
'feature-extraction': [],
'other': [],
'multiple-choice': [],
'text-classification': [],
'text-to-image': [],
'text2text-generation': [],
'zero-shot-image-classification': [],
'tabular-classification': [],
'tabular-regression': [],
'image-to-image': [],
'tabular-to-text': [],
'unconditional-image-generation': [],
'text-retrieval': [],
'text-to-speech': [],
'object-detection': [],
'audio-to-audio': [],
'text-generation': [],
'conversational': [],
'table-question-answering': [],
'visual-question-answering': [],
'image-to-text': [],
'reinforcement-learning': [],
'voice-activity-detection': [],
'time-series-forecasting': [],
'document-question-answering': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
snake_case_ = ArgumentParser(usage='Validate the yaml metadata block of a README.md file.')
ap.add_argument('readme_filepath')
snake_case_ = ap.parse_args()
snake_case_ = Path(args.readme_filepath)
snake_case_ = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 703 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'Intel/dpt-large': 'https://huggingface.co/Intel/dpt-large/resolve/main/config.json',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
_A = "dpt"
def __init__( self , lowercase__=768 , lowercase__=12 , lowercase__=12 , lowercase__=3072 , lowercase__="gelu" , lowercase__=0.0 , lowercase__=0.0 , lowercase__=0.02 , lowercase__=1e-12 , lowercase__=384 , lowercase__=16 , lowercase__=3 , lowercase__=False , lowercase__=True , lowercase__=[2, 5, 8, 11] , lowercase__="project" , lowercase__=[4, 2, 1, 0.5] , lowercase__=[96, 192, 384, 768] , lowercase__=256 , lowercase__=-1 , lowercase__=False , lowercase__=True , lowercase__=0.4 , lowercase__=255 , lowercase__=0.1 , lowercase__=[1, 1024, 24, 24] , lowercase__=[0, 1] , lowercase__=None , **lowercase__ , ):
"""simple docstring"""
super().__init__(**lowercase__ )
SCREAMING_SNAKE_CASE_ : Any = hidden_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("Initializing the config with a `BiT` backbone." )
SCREAMING_SNAKE_CASE_ : Tuple = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BitConfig(**lowercase__ )
elif isinstance(lowercase__ , lowercase__ ):
logger.info("Initializing the config with a `BiT` backbone." )
SCREAMING_SNAKE_CASE_ : Dict = BitConfig(**lowercase__ )
elif isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = backbone_config
else:
raise ValueError(
F"backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}." )
SCREAMING_SNAKE_CASE_ : List[Any] = backbone_featmap_shape
SCREAMING_SNAKE_CASE_ : Union[str, Any] = neck_ignore_stages
if readout_type != "project":
raise ValueError("Readout type must be 'project' when using `DPT-hybrid` mode." )
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
SCREAMING_SNAKE_CASE_ : int = None
SCREAMING_SNAKE_CASE_ : List[Any] = []
SCREAMING_SNAKE_CASE_ : int = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Tuple = intermediate_size
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE_ : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : str = initializer_range
SCREAMING_SNAKE_CASE_ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Any = image_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = patch_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = qkv_bias
SCREAMING_SNAKE_CASE_ : Optional[Any] = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("Readout_type must be one of ['ignore', 'add', 'project']" )
SCREAMING_SNAKE_CASE_ : Any = readout_type
SCREAMING_SNAKE_CASE_ : Optional[Any] = reassemble_factors
SCREAMING_SNAKE_CASE_ : str = neck_hidden_sizes
SCREAMING_SNAKE_CASE_ : Union[str, Any] = fusion_hidden_size
SCREAMING_SNAKE_CASE_ : Any = head_in_index
SCREAMING_SNAKE_CASE_ : str = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
SCREAMING_SNAKE_CASE_ : List[Any] = use_auxiliary_head
SCREAMING_SNAKE_CASE_ : int = auxiliary_loss_weight
SCREAMING_SNAKE_CASE_ : Union[str, Any] = semantic_loss_ignore_index
SCREAMING_SNAKE_CASE_ : Any = semantic_classifier_dropout
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
SCREAMING_SNAKE_CASE_ : List[str] = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE_ : Optional[int] = self.__class__.model_type
return output
| 68 | 0 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
_A = ["""image_processor""", """tokenizer"""]
_A = """ChineseCLIPImageProcessor"""
_A = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , lowercase__=None , lowercase__=None , **lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowercase__ , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE_ : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.image_processor
def __call__( self , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ ):
"""simple docstring"""
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
SCREAMING_SNAKE_CASE_ : Any = self.tokenizer(lowercase__ , return_tensors=lowercase__ , **lowercase__ )
if images is not None:
SCREAMING_SNAKE_CASE_ : Tuple = self.image_processor(lowercase__ , return_tensors=lowercase__ , **lowercase__ )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase__ ) , tensor_type=lowercase__ )
def __lowerCamelCase ( self , *lowercase__ , **lowercase__ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowercase__ , **lowercase__ )
def __lowerCamelCase ( self , *lowercase__ , **lowercase__ ):
"""simple docstring"""
return self.tokenizer.decode(*lowercase__ , **lowercase__ )
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowercase__ , )
return self.image_processor_class
| 704 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self , lowercase__ , lowercase__=7 , lowercase__=3 , lowercase__=18 , lowercase__=30 , lowercase__=400 , lowercase__=True , lowercase__=None , lowercase__=True , lowercase__=None , lowercase__=True , lowercase__=[0.48145466, 0.4578275, 0.40821073] , lowercase__=[0.26862954, 0.26130258, 0.27577711] , lowercase__=True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = size if size is not None else {"height": 224, "width": 224}
SCREAMING_SNAKE_CASE_ : Optional[Any] = crop_size if crop_size is not None else {"height": 18, "width": 18}
SCREAMING_SNAKE_CASE_ : str = parent
SCREAMING_SNAKE_CASE_ : List[Any] = batch_size
SCREAMING_SNAKE_CASE_ : Dict = num_channels
SCREAMING_SNAKE_CASE_ : Any = image_size
SCREAMING_SNAKE_CASE_ : Tuple = min_resolution
SCREAMING_SNAKE_CASE_ : Optional[Any] = max_resolution
SCREAMING_SNAKE_CASE_ : Tuple = do_resize
SCREAMING_SNAKE_CASE_ : List[str] = size
SCREAMING_SNAKE_CASE_ : str = do_center_crop
SCREAMING_SNAKE_CASE_ : List[str] = crop_size
SCREAMING_SNAKE_CASE_ : int = do_normalize
SCREAMING_SNAKE_CASE_ : Optional[int] = image_mean
SCREAMING_SNAKE_CASE_ : Dict = image_std
SCREAMING_SNAKE_CASE_ : List[Any] = do_convert_rgb
def __lowerCamelCase ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def __lowerCamelCase ( self , lowercase__=False , lowercase__=False , lowercase__=False ):
"""simple docstring"""
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
SCREAMING_SNAKE_CASE_ : str = []
for i in range(self.batch_size ):
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Optional[Any] = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
SCREAMING_SNAKE_CASE_ : str = [Image.fromarray(np.moveaxis(lowercase__ , 0 , -1 ) ) for x in image_inputs]
if torchify:
SCREAMING_SNAKE_CASE_ : List[str] = [torch.from_numpy(lowercase__ ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase,unittest.TestCase ):
_A = ChineseCLIPImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = ChineseCLIPImageProcessingTester(self , do_center_crop=lowercase__ )
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase__ , "do_resize" ) )
self.assertTrue(hasattr(lowercase__ , "size" ) )
self.assertTrue(hasattr(lowercase__ , "do_center_crop" ) )
self.assertTrue(hasattr(lowercase__ , "center_crop" ) )
self.assertTrue(hasattr(lowercase__ , "do_normalize" ) )
self.assertTrue(hasattr(lowercase__ , "image_mean" ) )
self.assertTrue(hasattr(lowercase__ , "image_std" ) )
self.assertTrue(hasattr(lowercase__ , "do_convert_rgb" ) )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 224, "width": 224} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
SCREAMING_SNAKE_CASE_ : str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def __lowerCamelCase ( self ):
"""simple docstring"""
pass
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ : Tuple = self.image_processor_tester.prepare_inputs(equal_resolution=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(lowercase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.image_processor_tester.prepare_inputs(equal_resolution=lowercase__ , numpify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ : List[str] = image_processing(lowercase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ : List[str] = self.image_processor_tester.prepare_inputs(equal_resolution=lowercase__ , torchify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ : int = image_processing(lowercase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase,unittest.TestCase ):
_A = ChineseCLIPImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=lowercase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 3
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase__ , "do_resize" ) )
self.assertTrue(hasattr(lowercase__ , "size" ) )
self.assertTrue(hasattr(lowercase__ , "do_center_crop" ) )
self.assertTrue(hasattr(lowercase__ , "center_crop" ) )
self.assertTrue(hasattr(lowercase__ , "do_normalize" ) )
self.assertTrue(hasattr(lowercase__ , "image_mean" ) )
self.assertTrue(hasattr(lowercase__ , "image_std" ) )
self.assertTrue(hasattr(lowercase__ , "do_convert_rgb" ) )
def __lowerCamelCase ( self ):
"""simple docstring"""
pass
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ : Optional[int] = self.image_processor_tester.prepare_inputs(equal_resolution=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ : List[str] = image_processing(lowercase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 68 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ : Union[str, Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Tuple = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[Any] = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
snake_case_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 705 |
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = str(SCREAMING_SNAKE_CASE_ )
return len(SCREAMING_SNAKE_CASE_ ) == 9 and set(SCREAMING_SNAKE_CASE_ ) == set("123456789" )
def __lowerCamelCase ( ) -> int | None:
"""simple docstring"""
for base_num in range(9_9_9_9 , 4_9_9_9 , -1 ):
SCREAMING_SNAKE_CASE_ : int = 1_0_0_0_0_2 * base_num
if is_9_pandigital(SCREAMING_SNAKE_CASE_ ):
return candidate
for base_num in range(3_3_3 , 9_9 , -1 ):
SCREAMING_SNAKE_CASE_ : List[str] = 1_0_0_2_0_0_3 * base_num
if is_9_pandigital(SCREAMING_SNAKE_CASE_ ):
return candidate
return None
if __name__ == "__main__":
print(F'''{solution() = }''')
| 68 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
snake_case_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
_A = ["pixel_values"]
def __init__( self , lowercase__ = True , lowercase__ = None , lowercase__ = PILImageResampling.BILINEAR , lowercase__ = True , lowercase__ = None , lowercase__ = True , lowercase__ = 1 / 255 , lowercase__ = True , lowercase__ = None , lowercase__ = None , **lowercase__ , ):
"""simple docstring"""
super().__init__(**lowercase__ )
SCREAMING_SNAKE_CASE_ : str = size if size is not None else {"shortest_edge": 256}
SCREAMING_SNAKE_CASE_ : str = get_size_dict(lowercase__ , default_to_square=lowercase__ )
SCREAMING_SNAKE_CASE_ : Any = crop_size if crop_size is not None else {"height": 224, "width": 224}
SCREAMING_SNAKE_CASE_ : Optional[int] = get_size_dict(lowercase__ , param_name="crop_size" )
SCREAMING_SNAKE_CASE_ : Optional[Any] = do_resize
SCREAMING_SNAKE_CASE_ : Any = size
SCREAMING_SNAKE_CASE_ : List[Any] = resample
SCREAMING_SNAKE_CASE_ : List[str] = do_center_crop
SCREAMING_SNAKE_CASE_ : Tuple = crop_size
SCREAMING_SNAKE_CASE_ : List[str] = do_rescale
SCREAMING_SNAKE_CASE_ : List[Any] = rescale_factor
SCREAMING_SNAKE_CASE_ : Optional[int] = do_normalize
SCREAMING_SNAKE_CASE_ : Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ = PILImageResampling.BICUBIC , lowercase__ = None , **lowercase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = get_size_dict(lowercase__ , default_to_square=lowercase__ )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
SCREAMING_SNAKE_CASE_ : List[Any] = get_resize_output_image_size(lowercase__ , size=size["shortest_edge"] , default_to_square=lowercase__ )
return resize(lowercase__ , size=lowercase__ , resample=lowercase__ , data_format=lowercase__ , **lowercase__ )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = get_size_dict(lowercase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(lowercase__ , size=(size["height"], size["width"]) , data_format=lowercase__ , **lowercase__ )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ ):
"""simple docstring"""
return rescale(lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__ )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
"""simple docstring"""
return normalize(lowercase__ , mean=lowercase__ , std=lowercase__ , data_format=lowercase__ , **lowercase__ )
def __lowerCamelCase ( self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_ : Optional[Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE_ : List[Any] = get_size_dict(lowercase__ , default_to_square=lowercase__ )
SCREAMING_SNAKE_CASE_ : str = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_ : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE_ : Optional[Any] = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_size_dict(lowercase__ , param_name="crop_size" )
SCREAMING_SNAKE_CASE_ : Any = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_ : List[str] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE_ : List[str] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE_ : List[Any] = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_ : Optional[int] = [to_numpy_array(lowercase__ ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_ : str = [self.resize(image=lowercase__ , size=lowercase__ , resample=lowercase__ ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE_ : List[str] = [self.center_crop(image=lowercase__ , size=lowercase__ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE_ : Optional[int] = [self.rescale(image=lowercase__ , scale=lowercase__ ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.normalize(image=lowercase__ , mean=lowercase__ , std=lowercase__ ) for image in images]
SCREAMING_SNAKE_CASE_ : List[Any] = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images]
SCREAMING_SNAKE_CASE_ : int = {"pixel_values": images}
return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
def __lowerCamelCase ( self , lowercase__ , lowercase__ = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(lowercase__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = target_sizes.numpy()
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for idx in range(len(lowercase__ ) ):
SCREAMING_SNAKE_CASE_ : str = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=lowercase__ )
SCREAMING_SNAKE_CASE_ : int = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowercase__ )
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 706 |
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__( self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = True , lowercase__ = None , lowercase__ = False , lowercase__ = None , lowercase__ = True , lowercase__ = "arrow" , **lowercase__ , ):
"""simple docstring"""
super().__init__(
split=lowercase__ , features=lowercase__ , cache_dir=lowercase__ , keep_in_memory=lowercase__ , streaming=lowercase__ , **lowercase__ , )
SCREAMING_SNAKE_CASE_ : Any = load_from_cache_file
SCREAMING_SNAKE_CASE_ : Optional[int] = file_format
SCREAMING_SNAKE_CASE_ : List[Any] = Spark(
df=lowercase__ , features=lowercase__ , cache_dir=lowercase__ , working_dir=lowercase__ , **lowercase__ , )
def __lowerCamelCase ( self ):
"""simple docstring"""
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
SCREAMING_SNAKE_CASE_ : Optional[int] = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=lowercase__ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 68 | 0 |
'''simple docstring'''
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def __lowerCamelCase ( ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = ArgumentParser("Transformers CLI tool" , usage="transformers-cli <command> [<args>]" )
SCREAMING_SNAKE_CASE_ : List[Any] = parser.add_subparsers(help="transformers-cli command helpers" )
# Register commands
ConvertCommand.register_subcommand(lowercase_ )
DownloadCommand.register_subcommand(lowercase_ )
EnvironmentCommand.register_subcommand(lowercase_ )
RunCommand.register_subcommand(lowercase_ )
ServeCommand.register_subcommand(lowercase_ )
UserCommands.register_subcommand(lowercase_ )
AddNewModelCommand.register_subcommand(lowercase_ )
AddNewModelLikeCommand.register_subcommand(lowercase_ )
LfsCommands.register_subcommand(lowercase_ )
PTtoTFCommand.register_subcommand(lowercase_ )
# Let's go
SCREAMING_SNAKE_CASE_ : Optional[Any] = parser.parse_args()
if not hasattr(lowercase_ , "func" ):
parser.print_help()
exit(1 )
# Run
SCREAMING_SNAKE_CASE_ : str = args.func(lowercase_ )
service.run()
if __name__ == "__main__":
main()
| 707 |
'''simple docstring'''
import re
import string
import numpy as np
import datasets
snake_case_ = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
snake_case_ = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
snake_case_ = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION,_KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def __lowerCamelCase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , reference_urls=[] , )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__=None , lowercase__=False , lowercase__=False , lowercase__=False , ):
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array([re.sub(lowercase__ , "" , lowercase__ ) for x in predictions] )
SCREAMING_SNAKE_CASE_ : List[Any] = np.array([re.sub(lowercase__ , "" , lowercase__ ) for x in references] )
else:
SCREAMING_SNAKE_CASE_ : int = np.asarray(lowercase__ )
SCREAMING_SNAKE_CASE_ : Any = np.asarray(lowercase__ )
if ignore_case:
SCREAMING_SNAKE_CASE_ : Dict = np.char.lower(lowercase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = np.char.lower(lowercase__ )
if ignore_punctuation:
SCREAMING_SNAKE_CASE_ : Optional[int] = string.punctuation.maketrans("" , "" , string.punctuation )
SCREAMING_SNAKE_CASE_ : int = np.char.translate(lowercase__ , table=lowercase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.char.translate(lowercase__ , table=lowercase__ )
if ignore_numbers:
SCREAMING_SNAKE_CASE_ : Optional[int] = string.digits.maketrans("" , "" , string.digits )
SCREAMING_SNAKE_CASE_ : Dict = np.char.translate(lowercase__ , table=lowercase__ )
SCREAMING_SNAKE_CASE_ : int = np.char.translate(lowercase__ , table=lowercase__ )
SCREAMING_SNAKE_CASE_ : str = predictions == references
return {"exact_match": np.mean(lowercase__ ) * 100}
| 68 | 0 |
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> tuple[float, float]:
"""simple docstring"""
if not len(__UpperCamelCase ) == len(__UpperCamelCase ) == 3:
raise ValueError("Please enter a valid equation." )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("Both a & b of two equations can\'t be zero." )
# Extract the coefficients
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Optional[int] = equationa
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Dict = equationa
# Calculate the determinants of the matrices
SCREAMING_SNAKE_CASE_ : Any = aa * ba - aa * ba
SCREAMING_SNAKE_CASE_ : Tuple = ca * ba - ca * ba
SCREAMING_SNAKE_CASE_ : Dict = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("Infinite solutions. (Consistent system)" )
else:
raise ValueError("No solution. (Inconsistent system)" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
SCREAMING_SNAKE_CASE_ : int = determinant_x / determinant
SCREAMING_SNAKE_CASE_ : Tuple = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 708 |
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case_ = logging.get_logger(__name__)
snake_case_ = {'vocab_file': 'spiece.model'}
snake_case_ = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
snake_case_ = {
'AI-Sweden/gpt-sw3-126m': 2_0_4_8,
'AI-Sweden/gpt-sw3-350m': 2_0_4_8,
'AI-Sweden/gpt-sw3-1.6b': 2_0_4_8,
'AI-Sweden/gpt-sw3-6.7b': 2_0_4_8,
'AI-Sweden/gpt-sw3-20b': 2_0_4_8,
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = ["input_ids", "attention_mask"]
def __init__( self , lowercase__ , lowercase__=False , lowercase__=False , lowercase__=False , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__ = None , **lowercase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
SCREAMING_SNAKE_CASE_ : Dict = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
SCREAMING_SNAKE_CASE_ : str = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
SCREAMING_SNAKE_CASE_ : List[Any] = "<|endoftext|>" if eos_token is None else eos_token
SCREAMING_SNAKE_CASE_ : Dict = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
SCREAMING_SNAKE_CASE_ : Tuple = unk_token if pad_token is None else pad_token
SCREAMING_SNAKE_CASE_ : Optional[Any] = eos_token if bos_token is None else bos_token
else:
SCREAMING_SNAKE_CASE_ : int = "<pad>" if pad_token is None else pad_token
SCREAMING_SNAKE_CASE_ : Any = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=lowercase__ , remove_space=lowercase__ , keep_accents=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , unk_token=lowercase__ , pad_token=lowercase__ , sp_model_kwargs=self.sp_model_kwargs , **lowercase__ , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = do_lower_case
SCREAMING_SNAKE_CASE_ : Optional[int] = remove_space
SCREAMING_SNAKE_CASE_ : int = keep_accents
SCREAMING_SNAKE_CASE_ : List[Any] = vocab_file
SCREAMING_SNAKE_CASE_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase__ )
# Used for whitespace normalization in input texts
# fmt : off
SCREAMING_SNAKE_CASE_ : int = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(
F"[{''.join(map(lowercase__ , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]" )
def __getstate__( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : Dict = None
return state
def __setstate__( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {}
SCREAMING_SNAKE_CASE_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def __lowerCamelCase ( self ):
"""simple docstring"""
return len(self.sp_model )
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.non_printing_characters_re.sub("" , lowercase__ )
# Normalize whitespaces
SCREAMING_SNAKE_CASE_ : List[str] = "".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
SCREAMING_SNAKE_CASE_ : List[Any] = unicodedata.normalize("NFC" , lowercase__ )
return text
def __lowerCamelCase ( self , lowercase__ , **lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.preprocess_text(lowercase__ )
return self.sp_model.encode(lowercase__ , out_type=lowercase__ )
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
return self.sp_model.PieceToId(lowercase__ )
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
return self.sp_model.IdToPiece(lowercase__ )
@staticmethod
def __lowerCamelCase ( lowercase__ ):
"""simple docstring"""
return out_string
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
SCREAMING_SNAKE_CASE_ : Any = ""
SCREAMING_SNAKE_CASE_ : Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase__ ) + token
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
SCREAMING_SNAKE_CASE_ : int = []
else:
current_sub_tokens.append(lowercase__ )
SCREAMING_SNAKE_CASE_ : List[str] = False
out_string += self.sp_model.decode(lowercase__ )
return out_string
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = {self.convert_ids_to_tokens(lowercase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCamelCase ( self , lowercase__ , lowercase__ = None ):
"""simple docstring"""
if not os.path.isdir(lowercase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE_ : Any = os.path.join(
lowercase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase__ , "wb" ) as fi:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(lowercase__ )
return (out_vocab_file,)
def __lowerCamelCase ( self , lowercase__ , lowercase__ = False ):
"""simple docstring"""
if isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = self.preprocess_text(lowercase__ )
SCREAMING_SNAKE_CASE_ : Any = self.sp_model.encode(lowercase__ )
else:
SCREAMING_SNAKE_CASE_ : str = [self.preprocess_text(lowercase__ ) for t in text]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.sp_model.encode(lowercase__ )
if return_tensors is True or return_tensors == "pt":
SCREAMING_SNAKE_CASE_ : str = torch.tensor(lowercase__ )
return token_ids
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
return self.sp_model.decode(lowercase__ )
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [F"User: {text}" if is_user else F"Bot: {text}" for is_user, text in conversation.iter_texts()]
SCREAMING_SNAKE_CASE_ : List[str] = (
F"{self.eos_token}{self.bos_token}" + F"{self.bos_token}".join(lowercase__ ) + F"{self.bos_token}Bot:"
)
return self.encode(text=lowercase__ )
| 68 | 0 |
'''simple docstring'''
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
snake_case_ = logging.get_logger(__name__)
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = set()
SCREAMING_SNAKE_CASE_ : str = []
def parse_line(SCREAMING_SNAKE_CASE_ : Optional[int] ):
for line in fp:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ : str = line.decode("UTF-8" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(" " ):
# process a single warning and move it to `selected_warnings`.
if len(SCREAMING_SNAKE_CASE_ ) > 0:
SCREAMING_SNAKE_CASE_ : List[Any] = "\n".join(SCREAMING_SNAKE_CASE_ )
# Only keep the warnings specified in `targets`
if any(F": {x}: " in warning for x in targets ):
selected_warnings.add(SCREAMING_SNAKE_CASE_ )
buffer.clear()
continue
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = line.strip()
buffer.append(SCREAMING_SNAKE_CASE_ )
if from_gh:
for filename in os.listdir(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
# read the file
if filename != "warnings.txt":
continue
with open(SCREAMING_SNAKE_CASE_ ) as fp:
parse_line(SCREAMING_SNAKE_CASE_ )
else:
try:
with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ ) as z:
for filename in z.namelist():
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
# read the file
if filename != "warnings.txt":
continue
with z.open(SCREAMING_SNAKE_CASE_ ) as fp:
parse_line(SCREAMING_SNAKE_CASE_ )
except Exception:
logger.warning(
F"{artifact_path} is either an invalid zip file or something else wrong. This file is skipped." )
return selected_warnings
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = set()
SCREAMING_SNAKE_CASE_ : List[Any] = [os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for p in os.listdir(SCREAMING_SNAKE_CASE_ ) if (p.endswith(".zip" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
return selected_warnings
if __name__ == "__main__":
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return values.split("," )
snake_case_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
# optional parameters
parser.add_argument(
'--targets',
default='DeprecationWarning,UserWarning,FutureWarning',
type=list_str,
help='Comma-separated list of target warning(s) which we want to extract.',
)
parser.add_argument(
'--from_gh',
action='store_true',
help='If running from a GitHub action workflow and collecting warnings from its artifacts.',
)
snake_case_ = parser.parse_args()
snake_case_ = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
snake_case_ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('=' * 8_0)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
snake_case_ = extract_warnings(args.output_dir, args.targets)
snake_case_ = sorted(selected_warnings)
with open(os.path.join(args.output_dir, 'selected_warnings.json'), 'w', encoding='UTF-8') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 709 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
snake_case_ = True
except (ImportError, ModuleNotFoundError):
snake_case_ = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
re.sub("<n>" , "" , SCREAMING_SNAKE_CASE_ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(SCREAMING_SNAKE_CASE_ ) )
| 68 | 0 |
'''simple docstring'''
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
def __init__( self , *lowercase__ , lowercase__=None , lowercase__=None , **lowercase__ ):
"""simple docstring"""
super().__init__(*_a , **_a )
SCREAMING_SNAKE_CASE_ : Optional[Any] = eval_examples
SCREAMING_SNAKE_CASE_ : int = post_process_function
def __lowerCamelCase ( self , lowercase__ = None , lowercase__=None , lowercase__ = None , lowercase__ = "eval" , **lowercase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = gen_kwargs.copy()
SCREAMING_SNAKE_CASE_ : int = (
gen_kwargs["""max_length"""] if gen_kwargs.get("max_length" ) is not None else self.args.generation_max_length
)
SCREAMING_SNAKE_CASE_ : List[Any] = (
gen_kwargs["""num_beams"""] if gen_kwargs.get("num_beams" ) is not None else self.args.generation_num_beams
)
SCREAMING_SNAKE_CASE_ : str = gen_kwargs
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.eval_dataset if eval_dataset is None else eval_dataset
SCREAMING_SNAKE_CASE_ : List[str] = self.get_eval_dataloader(_a )
SCREAMING_SNAKE_CASE_ : Any = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE_ : Optional[int] = self.compute_metrics
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : Any = time.time()
SCREAMING_SNAKE_CASE_ : List[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = eval_loop(
_a , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_a , metric_key_prefix=_a , )
finally:
SCREAMING_SNAKE_CASE_ : List[Any] = compute_metrics
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
_a , _a , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
SCREAMING_SNAKE_CASE_ : Dict = self.post_process_function(_a , _a , _a )
SCREAMING_SNAKE_CASE_ : List[Any] = self.compute_metrics(_a )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
SCREAMING_SNAKE_CASE_ : Optional[int] = metrics.pop(_a )
metrics.update(output.metrics )
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_a )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
SCREAMING_SNAKE_CASE_ : Any = self.callback_handler.on_evaluate(self.args , self.state , self.control , _a )
return metrics
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__=None , lowercase__ = "test" , **lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = gen_kwargs.copy()
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_test_dataloader(_a )
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE_ : List[str] = self.compute_metrics
SCREAMING_SNAKE_CASE_ : str = None
SCREAMING_SNAKE_CASE_ : List[Any] = time.time()
SCREAMING_SNAKE_CASE_ : Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
SCREAMING_SNAKE_CASE_ : Optional[Any] = eval_loop(
_a , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_a , metric_key_prefix=_a , )
finally:
SCREAMING_SNAKE_CASE_ : Any = compute_metrics
SCREAMING_SNAKE_CASE_ : Dict = self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
_a , _a , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
SCREAMING_SNAKE_CASE_ : List[str] = self.post_process_function(_a , _a , _a , "predict" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.compute_metrics(_a )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
SCREAMING_SNAKE_CASE_ : int = metrics.pop(_a )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_a )
| 710 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self , lowercase__ , lowercase__=2 , lowercase__=3 , lowercase__=4 , lowercase__=2 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=99 , lowercase__=36 , lowercase__=2 , lowercase__=4 , lowercase__=37 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=512 , lowercase__=16 , lowercase__=2 , lowercase__=0.02 , lowercase__=6 , lowercase__=6 , lowercase__=3 , lowercase__=4 , lowercase__=None , lowercase__=1000 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = parent
SCREAMING_SNAKE_CASE_ : Optional[int] = batch_size
SCREAMING_SNAKE_CASE_ : Dict = num_channels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_size
SCREAMING_SNAKE_CASE_ : Optional[int] = patch_size
SCREAMING_SNAKE_CASE_ : str = is_training
SCREAMING_SNAKE_CASE_ : str = use_input_mask
SCREAMING_SNAKE_CASE_ : Any = use_token_type_ids
SCREAMING_SNAKE_CASE_ : int = use_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE_ : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Any = num_attention_heads
SCREAMING_SNAKE_CASE_ : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : str = hidden_act
SCREAMING_SNAKE_CASE_ : int = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Dict = type_vocab_size
SCREAMING_SNAKE_CASE_ : List[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Tuple = initializer_range
SCREAMING_SNAKE_CASE_ : List[str] = coordinate_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = shape_size
SCREAMING_SNAKE_CASE_ : List[str] = num_labels
SCREAMING_SNAKE_CASE_ : Optional[int] = num_choices
SCREAMING_SNAKE_CASE_ : Union[str, Any] = scope
SCREAMING_SNAKE_CASE_ : Dict = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = text_seq_length
SCREAMING_SNAKE_CASE_ : Tuple = (image_size // patch_size) ** 2 + 1
SCREAMING_SNAKE_CASE_ : Optional[int] = self.text_seq_length + self.image_seq_length
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
SCREAMING_SNAKE_CASE_ : Dict = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
SCREAMING_SNAKE_CASE_ : Optional[int] = bbox[i, j, 3]
SCREAMING_SNAKE_CASE_ : str = bbox[i, j, 1]
SCREAMING_SNAKE_CASE_ : Dict = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
SCREAMING_SNAKE_CASE_ : List[Any] = bbox[i, j, 2]
SCREAMING_SNAKE_CASE_ : Dict = bbox[i, j, 0]
SCREAMING_SNAKE_CASE_ : Tuple = tmp_coordinate
SCREAMING_SNAKE_CASE_ : Dict = tf.constant(lowercase__ )
SCREAMING_SNAKE_CASE_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ : Dict = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Any = random_attention_mask([self.batch_size, self.text_seq_length] )
SCREAMING_SNAKE_CASE_ : List[str] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : Tuple = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ : str = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = TFLayoutLMvaModel(config=lowercase__ )
# text + image
SCREAMING_SNAKE_CASE_ : int = model(lowercase__ , pixel_values=lowercase__ , training=lowercase__ )
SCREAMING_SNAKE_CASE_ : str = model(
lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , training=lowercase__ , )
SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ , training=lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
SCREAMING_SNAKE_CASE_ : Tuple = model(lowercase__ , training=lowercase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
SCREAMING_SNAKE_CASE_ : int = model({"pixel_values": pixel_values} , training=lowercase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.num_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TFLayoutLMvaForSequenceClassification(config=lowercase__ )
SCREAMING_SNAKE_CASE_ : List[str] = model(
lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ , training=lowercase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.num_labels
SCREAMING_SNAKE_CASE_ : Any = TFLayoutLMvaForTokenClassification(config=lowercase__ )
SCREAMING_SNAKE_CASE_ : int = model(
lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ , training=lowercase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = 2
SCREAMING_SNAKE_CASE_ : List[Any] = TFLayoutLMvaForQuestionAnswering(config=lowercase__ )
SCREAMING_SNAKE_CASE_ : int = model(
lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , start_positions=lowercase__ , end_positions=lowercase__ , training=lowercase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE_), (SCREAMING_SNAKE_CASE_), (SCREAMING_SNAKE_CASE_), (SCREAMING_SNAKE_CASE_), (SCREAMING_SNAKE_CASE_), (SCREAMING_SNAKE_CASE_), (SCREAMING_SNAKE_CASE_), (SCREAMING_SNAKE_CASE_)) : Any = config_and_inputs
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase,_UpperCAmelCase,unittest.TestCase ):
_A = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
_A = (
{"document-question-answering": TFLayoutLMvaForQuestionAnswering, "feature-extraction": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
_A = False
_A = False
_A = False
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
return True
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = copy.deepcopy(lowercase__ )
if model_class in get_values(lowercase__ ):
SCREAMING_SNAKE_CASE_ : str = {
k: tf.tile(tf.expand_dims(lowercase__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(lowercase__ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowercase__ ):
SCREAMING_SNAKE_CASE_ : Tuple = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowercase__ ):
SCREAMING_SNAKE_CASE_ : Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
SCREAMING_SNAKE_CASE_ : List[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowercase__ ):
SCREAMING_SNAKE_CASE_ : List[str] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowercase__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = TFLayoutLMvaModelTester(self )
SCREAMING_SNAKE_CASE_ : int = ConfigTester(self , config_class=lowercase__ , hidden_size=37 )
def __lowerCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : int = model_class(lowercase__ )
if getattr(lowercase__ , "hf_compute_loss" , lowercase__ ):
# The number of elements in the loss should be the same as the number of elements in the label
SCREAMING_SNAKE_CASE_ : Dict = self._prepare_for_class(inputs_dict.copy() , lowercase__ , return_labels=lowercase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=lowercase__ )[0]
]
SCREAMING_SNAKE_CASE_ : Any = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
SCREAMING_SNAKE_CASE_ : Dict = self._prepare_for_class(inputs_dict.copy() , lowercase__ , return_labels=lowercase__ )
SCREAMING_SNAKE_CASE_ : List[str] = prepared_for_class.pop("input_ids" )
SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase__ , **lowercase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
SCREAMING_SNAKE_CASE_ : List[Any] = self._prepare_for_class(inputs_dict.copy() , lowercase__ , return_labels=lowercase__ )
SCREAMING_SNAKE_CASE_ : int = prepared_for_class.pop("input_ids" )
if "labels" in prepared_for_class:
SCREAMING_SNAKE_CASE_ : str = prepared_for_class["labels"].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
SCREAMING_SNAKE_CASE_ : str = -100
SCREAMING_SNAKE_CASE_ : str = tf.convert_to_tensor(lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = model(lowercase__ , **lowercase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
SCREAMING_SNAKE_CASE_ : List[Any] = self._prepare_for_class(inputs_dict.copy() , lowercase__ , return_labels=lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = model(lowercase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
SCREAMING_SNAKE_CASE_ : Optional[Any] = self._prepare_for_class(inputs_dict.copy() , lowercase__ , return_labels=lowercase__ )
# Get keys that were added with the _prepare_for_class function
SCREAMING_SNAKE_CASE_ : int = prepared_for_class.keys() - inputs_dict.keys()
SCREAMING_SNAKE_CASE_ : Optional[int] = inspect.signature(model.call ).parameters
SCREAMING_SNAKE_CASE_ : Tuple = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
SCREAMING_SNAKE_CASE_ : List[Any] = {0: "input_ids"}
for label_key in label_keys:
SCREAMING_SNAKE_CASE_ : Optional[int] = signature_names.index(lowercase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = label_key
SCREAMING_SNAKE_CASE_ : List[str] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
SCREAMING_SNAKE_CASE_ : List[str] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
SCREAMING_SNAKE_CASE_ : List[str] = prepared_for_class[value]
SCREAMING_SNAKE_CASE_ : List[Any] = tuple(lowercase__ )
# Send to model
SCREAMING_SNAKE_CASE_ : int = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def __lowerCamelCase ( self ):
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
),
) : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
),
) : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE_ : List[str] = type
self.model_tester.create_and_check_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
),
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
),
) : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
),
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
@slow
def __lowerCamelCase ( self ):
"""simple docstring"""
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TFLayoutLMvaModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
def __lowerCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def __lowerCamelCase ( self ):
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=lowercase__ ) if is_vision_available() else None
@slow
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = TFLayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" )
SCREAMING_SNAKE_CASE_ : Any = self.default_image_processor
SCREAMING_SNAKE_CASE_ : Optional[int] = prepare_img()
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_processor(images=lowercase__ , return_tensors="tf" ).pixel_values
SCREAMING_SNAKE_CASE_ : Dict = tf.constant([[1, 2]] )
SCREAMING_SNAKE_CASE_ : Any = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
SCREAMING_SNAKE_CASE_ : List[Any] = model(input_ids=lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ , training=lowercase__ )
# verify the logits
SCREAMING_SNAKE_CASE_ : Tuple = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , lowercase__ )
SCREAMING_SNAKE_CASE_ : int = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase__ , atol=1e-4 ) )
| 68 | 0 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 711 |
'''simple docstring'''
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = [1]
for i in range(2 , SCREAMING_SNAKE_CASE_ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
SCREAMING_SNAKE_CASE_ : Dict = list(range(SCREAMING_SNAKE_CASE_ ) )
# Find permutation
while factorials:
SCREAMING_SNAKE_CASE_ : Any = factorials.pop()
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Optional[int] = divmod(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 68 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
snake_case_ : Optional[Any] = {'configuration_dpt': ['DPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DPTConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : int = ['DPTFeatureExtractor']
snake_case_ : Optional[Any] = ['DPTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Any = [
'DPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DPTForDepthEstimation',
'DPTForSemanticSegmentation',
'DPTModel',
'DPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
snake_case_ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 712 |
'''simple docstring'''
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--original_config_file',
type=str,
required=True,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--image_size',
default=5_1_2,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Dict:
"""simple docstring"""
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F"could not parse string as bool {string}" )
parser.add_argument(
'--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool
)
parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int)
snake_case_ = parser.parse_args()
snake_case_ = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 68 | 0 |
'''simple docstring'''
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
snake_case_ = "Usage of script: script_name <size_of_canvas:int>"
snake_case_ = [0] * 1_0_0 + [1] * 1_0
random.shuffle(choice)
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int ) -> list[list[bool]]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [[False for i in range(__snake_case )] for j in range(__snake_case )]
return canvas
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : list[list[bool]] ) -> None:
"""simple docstring"""
for i, row in enumerate(__snake_case ):
for j, _ in enumerate(__snake_case ):
SCREAMING_SNAKE_CASE_ : List[Any] = bool(random.getrandbits(1 ) )
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : list[list[bool]] ) -> list[list[bool]]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = np.array(__snake_case )
SCREAMING_SNAKE_CASE_ : Optional[int] = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(__snake_case ):
for c, pt in enumerate(__snake_case ):
SCREAMING_SNAKE_CASE_ : List[Any] = __judge_point(
__snake_case , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
SCREAMING_SNAKE_CASE_ : Optional[int] = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
SCREAMING_SNAKE_CASE_ : list[list[bool]] = current_canvas.tolist()
return return_canvas
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : bool , SCREAMING_SNAKE_CASE_ : list[list[bool]] ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = 0
SCREAMING_SNAKE_CASE_ : int = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
SCREAMING_SNAKE_CASE_ : List[str] = pt
if pt:
if alive < 2:
SCREAMING_SNAKE_CASE_ : str = False
elif alive == 2 or alive == 3:
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
elif alive > 3:
SCREAMING_SNAKE_CASE_ : int = False
else:
if alive == 3:
SCREAMING_SNAKE_CASE_ : List[Any] = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
snake_case_ = int(sys.argv[1])
# main working structure of this module.
snake_case_ = create_canvas(canvas_size)
seed(c)
snake_case_ = plt.subplots()
fig.show()
snake_case_ = ListedColormap(['w', 'k'])
try:
while True:
snake_case_ = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 713 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
_A = "camembert"
def __init__( self , lowercase__=3_0522 , lowercase__=768 , lowercase__=12 , lowercase__=12 , lowercase__=3072 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=512 , lowercase__=2 , lowercase__=0.02 , lowercase__=1e-12 , lowercase__=1 , lowercase__=0 , lowercase__=2 , lowercase__="absolute" , lowercase__=True , lowercase__=None , **lowercase__ , ):
"""simple docstring"""
super().__init__(pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__ )
SCREAMING_SNAKE_CASE_ : Tuple = vocab_size
SCREAMING_SNAKE_CASE_ : str = hidden_size
SCREAMING_SNAKE_CASE_ : str = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Dict = num_attention_heads
SCREAMING_SNAKE_CASE_ : Any = hidden_act
SCREAMING_SNAKE_CASE_ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : List[str] = type_vocab_size
SCREAMING_SNAKE_CASE_ : Tuple = initializer_range
SCREAMING_SNAKE_CASE_ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : List[Any] = position_embedding_type
SCREAMING_SNAKE_CASE_ : Any = use_cache
SCREAMING_SNAKE_CASE_ : Optional[int] = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE_ : Any = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 68 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
snake_case_ = {'configuration_swin': ['SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SwinConfig', 'SwinOnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'SWIN_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwinForImageClassification',
'SwinForMaskedImageModeling',
'SwinModel',
'SwinPreTrainedModel',
'SwinBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSwinForImageClassification',
'TFSwinForMaskedImageModeling',
'TFSwinModel',
'TFSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 714 |
'''simple docstring'''
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : list[int] ) -> list[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = len(SCREAMING_SNAKE_CASE_ )
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE_ ):
if numbers[j] < numbers[i]:
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Tuple = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
snake_case_ = input('Enter numbers separated by a comma:\n').strip()
snake_case_ = [int(item) for item in user_input.split(',')]
print(exchange_sort(unsorted))
| 68 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
_A = "cvt"
def __init__( self , lowercase__=3 , lowercase__=[7, 3, 3] , lowercase__=[4, 2, 2] , lowercase__=[2, 1, 1] , lowercase__=[64, 192, 384] , lowercase__=[1, 3, 6] , lowercase__=[1, 2, 10] , lowercase__=[4.0, 4.0, 4.0] , lowercase__=[0.0, 0.0, 0.0] , lowercase__=[0.0, 0.0, 0.0] , lowercase__=[0.0, 0.0, 0.1] , lowercase__=[True, True, True] , lowercase__=[False, False, True] , lowercase__=["dw_bn", "dw_bn", "dw_bn"] , lowercase__=[3, 3, 3] , lowercase__=[1, 1, 1] , lowercase__=[2, 2, 2] , lowercase__=[1, 1, 1] , lowercase__=[1, 1, 1] , lowercase__=0.02 , lowercase__=1e-12 , **lowercase__ , ):
"""simple docstring"""
super().__init__(**__A )
SCREAMING_SNAKE_CASE_ : Optional[int] = num_channels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = patch_sizes
SCREAMING_SNAKE_CASE_ : Optional[Any] = patch_stride
SCREAMING_SNAKE_CASE_ : Tuple = patch_padding
SCREAMING_SNAKE_CASE_ : Optional[int] = embed_dim
SCREAMING_SNAKE_CASE_ : str = num_heads
SCREAMING_SNAKE_CASE_ : int = depth
SCREAMING_SNAKE_CASE_ : Any = mlp_ratio
SCREAMING_SNAKE_CASE_ : Dict = attention_drop_rate
SCREAMING_SNAKE_CASE_ : Any = drop_rate
SCREAMING_SNAKE_CASE_ : Union[str, Any] = drop_path_rate
SCREAMING_SNAKE_CASE_ : Any = qkv_bias
SCREAMING_SNAKE_CASE_ : Dict = cls_token
SCREAMING_SNAKE_CASE_ : Any = qkv_projection_method
SCREAMING_SNAKE_CASE_ : Union[str, Any] = kernel_qkv
SCREAMING_SNAKE_CASE_ : Optional[Any] = padding_kv
SCREAMING_SNAKE_CASE_ : Union[str, Any] = stride_kv
SCREAMING_SNAKE_CASE_ : Tuple = padding_q
SCREAMING_SNAKE_CASE_ : int = stride_q
SCREAMING_SNAKE_CASE_ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE_ : List[str] = layer_norm_eps
| 715 |
'''simple docstring'''
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
snake_case_ = logging.getLogger()
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Path , SCREAMING_SNAKE_CASE_ : list ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = "\n".join(SCREAMING_SNAKE_CASE_ )
Path(SCREAMING_SNAKE_CASE_ ).open("w" ).writelines(SCREAMING_SNAKE_CASE_ )
snake_case_ = 'patrickvonplaten/t5-tiny-random'
snake_case_ = 'sshleifer/bart-tiny-random'
snake_case_ = 'sshleifer/tiny-mbart'
snake_case_ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source"
SCREAMING_SNAKE_CASE_ : List[str] = input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
SCREAMING_SNAKE_CASE_ : Dict = [" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."]
_dump_articles(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE_ : Tuple = str(Path(self.get_auto_remove_tmp_dir() ) / "scores.json" )
SCREAMING_SNAKE_CASE_ : Tuple = "translation_en_to_de" if model == T5_TINY else "summarization"
SCREAMING_SNAKE_CASE_ : Dict = F"\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n ".split()
with patch.object(lowercase__ , "argv" , lowercase__ ):
run_generate()
assert Path(lowercase__ ).exists()
# os.remove(Path(output_file_name))
def __lowerCamelCase ( self ):
"""simple docstring"""
self.run_eval_tester(lowercase__ )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
self.run_eval_tester(lowercase__ )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source"
SCREAMING_SNAKE_CASE_ : Optional[Any] = input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
SCREAMING_SNAKE_CASE_ : List[Any] = {
"en": ["Machine learning is great, isn't it?", "I like to eat bananas", "Tomorrow is another great day!"],
"de": [
"Maschinelles Lernen ist großartig, oder?",
"Ich esse gerne Bananen",
"Morgen ist wieder ein toller Tag!",
],
}
SCREAMING_SNAKE_CASE_ : Dict = Path(self.get_auto_remove_tmp_dir() )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = str(tmp_dir / "scores.json" )
SCREAMING_SNAKE_CASE_ : List[Any] = str(tmp_dir / "val.target" )
_dump_articles(lowercase__ , text["en"] )
_dump_articles(lowercase__ , text["de"] )
SCREAMING_SNAKE_CASE_ : List[Any] = "translation_en_to_de" if model == T5_TINY else "summarization"
SCREAMING_SNAKE_CASE_ : List[str] = F"\n run_eval_search.py\n {model}\n {str(lowercase__ )}\n {str(lowercase__ )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n ".split()
testargs.extend(["--search", "num_beams=1:2 length_penalty=0.9:1.0"] )
with patch.object(lowercase__ , "argv" , lowercase__ ):
with CaptureStdout() as cs:
run_search()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [" num_beams | length_penalty", model, "Best score args"]
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["Info"]
if "translation" in task:
expected_strings.append("bleu" )
else:
expected_strings.extend(lowercase__ )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(lowercase__ ).exists()
os.remove(Path(lowercase__ ) )
| 68 | 0 |
'''simple docstring'''
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
_A = ComputeEnvironment.AMAZON_SAGEMAKER
_A = True
_A = '''ml.p3.2xlarge'''
_A = '''accelerate_sagemaker_execution_role'''
_A = '''hf-sm'''
_A = '''us-east-1'''
_A = 1
_A = '''accelerate-sagemaker-1'''
_A = '''1.6'''
_A = '''4.4'''
_A = '''train.py'''
_A = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''False''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
_A = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''--do_test''',
'''False''',
'''--do_predict''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args["model_name_or_path"] , __SCREAMING_SNAKE_CASE )
assert isinstance(converted_args["do_train"] , __SCREAMING_SNAKE_CASE )
assert isinstance(converted_args["epochs"] , __SCREAMING_SNAKE_CASE )
assert isinstance(converted_args["learning_rate"] , __SCREAMING_SNAKE_CASE )
assert isinstance(converted_args["max_steps"] , __SCREAMING_SNAKE_CASE )
with pytest.raises(__SCREAMING_SNAKE_CASE ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 716 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : NDArray[floataa] , SCREAMING_SNAKE_CASE_ : NDArray[floataa] , SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : int , ) -> list[float]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : str = coefficient_matrix.shape
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : List[Any] = constant_matrix.shape
if rowsa != colsa:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = F"Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"
raise ValueError(SCREAMING_SNAKE_CASE_ )
if colsa != 1:
SCREAMING_SNAKE_CASE_ : List[Any] = F"Constant matrix must be nx1 but received {rowsa}x{colsa}"
raise ValueError(SCREAMING_SNAKE_CASE_ )
if rowsa != rowsa:
SCREAMING_SNAKE_CASE_ : Any = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
F"received {rowsa}x{colsa} and {rowsa}x{colsa}"
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) != rowsa:
SCREAMING_SNAKE_CASE_ : int = (
"Number of initial values must be equal to number of rows in coefficient "
F"matrix but received {len(SCREAMING_SNAKE_CASE_ )} and {rowsa}"
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
SCREAMING_SNAKE_CASE_ : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Any = table.shape
strictly_diagonally_dominant(SCREAMING_SNAKE_CASE_ )
# Iterates the whole matrix for given number of times
for _ in range(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ : Tuple = []
for row in range(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ : Any = 0
for col in range(SCREAMING_SNAKE_CASE_ ):
if col == row:
SCREAMING_SNAKE_CASE_ : Any = table[row][col]
elif col == cols - 1:
SCREAMING_SNAKE_CASE_ : Dict = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
SCREAMING_SNAKE_CASE_ : Optional[Any] = (temp + val) / denom
new_val.append(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = new_val
return [float(SCREAMING_SNAKE_CASE_ ) for i in new_val]
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : NDArray[floataa] ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Dict = table.shape
SCREAMING_SNAKE_CASE_ : Tuple = True
for i in range(0 , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ : int = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 68 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.